repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
amith01994/intellij-community | plugins/hg4idea/testData/bin/mercurial/setdiscovery.py | 92 | 6894 | # setdiscovery.py - improved discovery of common nodeset for mercurial
#
# Copyright 2010 Benoit Boissinot <[email protected]>
# and Peter Arrenbrecht <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid
from i18n import _
import random, util, dagutil
def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
# if nodes is empty we scan the entire graph
if nodes:
heads = dag.headsetofconnecteds(nodes)
else:
heads = dag.heads()
dist = {}
visit = util.deque(heads)
seen = set()
factor = 1
while visit:
curr = visit.popleft()
if curr in seen:
continue
d = dist.setdefault(curr, 1)
if d > factor:
factor *= 2
if d == factor:
if curr not in always: # need this check for the early exit below
sample.add(curr)
if quicksamplesize and (len(sample) >= quicksamplesize):
return
seen.add(curr)
for p in dag.parents(curr):
if not nodes or p in nodes:
dist.setdefault(p, d + 1)
visit.append(p)
def _setupsample(dag, nodes, size):
if len(nodes) <= size:
return set(nodes), None, 0
always = dag.headsetofconnecteds(nodes)
desiredlen = size - len(always)
if desiredlen <= 0:
# This could be bad if there are very many heads, all unknown to the
# server. We're counting on long request support here.
return always, None, desiredlen
return always, set(), desiredlen
def _takequicksample(dag, nodes, size, initial):
always, sample, desiredlen = _setupsample(dag, nodes, size)
if sample is None:
return always
if initial:
fromset = None
else:
fromset = nodes
_updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
sample.update(always)
return sample
def _takefullsample(dag, nodes, size):
always, sample, desiredlen = _setupsample(dag, nodes, size)
if sample is None:
return always
# update from heads
_updatesample(dag, nodes, sample, always)
# update from roots
_updatesample(dag.inverse(), nodes, sample, always)
assert sample
if len(sample) > desiredlen:
sample = set(random.sample(sample, desiredlen))
elif len(sample) < desiredlen:
more = desiredlen - len(sample)
sample.update(random.sample(list(nodes - sample - always), more))
sample.update(always)
return sample
def findcommonheads(ui, local, remote,
initialsamplesize=100,
fullsamplesize=200,
abortwhenunrelated=True):
'''Return a tuple (common, anyincoming, remoteheads) used to identify
missing nodes from or in remote.
'''
roundtrips = 0
cl = local.changelog
dag = dagutil.revlogdag(cl)
# early exit if we know all the specified remote heads already
ui.debug("query 1; heads\n")
roundtrips += 1
ownheads = dag.heads()
sample = ownheads
if remote.local():
# stopgap until we have a proper localpeer that supports batch()
srvheadhashes = remote.heads()
yesno = remote.known(dag.externalizeall(sample))
elif remote.capable('batch'):
batch = remote.batch()
srvheadhashesref = batch.heads()
yesnoref = batch.known(dag.externalizeall(sample))
batch.submit()
srvheadhashes = srvheadhashesref.value
yesno = yesnoref.value
else:
# compatibility with pre-batch, but post-known remotes during 1.9
# development
srvheadhashes = remote.heads()
sample = []
if cl.tip() == nullid:
if srvheadhashes != [nullid]:
return [nullid], True, srvheadhashes
return [nullid], False, []
# start actual discovery (we note this before the next "if" for
# compatibility reasons)
ui.status(_("searching for changes\n"))
srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
if len(srvheads) == len(srvheadhashes):
ui.debug("all remote heads known locally\n")
return (srvheadhashes, False, srvheadhashes,)
if sample and util.all(yesno):
ui.note(_("all local heads known remotely\n"))
ownheadhashes = dag.externalizeall(ownheads)
return (ownheadhashes, True, srvheadhashes,)
# full blown discovery
# own nodes where I don't know if remote knows them
undecided = dag.nodeset()
# own nodes I know we both know
common = set()
# own nodes I know remote lacks
missing = set()
# treat remote heads (and maybe own heads) as a first implicit sample
# response
common.update(dag.ancestorset(srvheads))
undecided.difference_update(common)
full = False
while undecided:
if sample:
commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
common.update(dag.ancestorset(commoninsample, common))
missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
missing.update(dag.descendantset(missinginsample, missing))
undecided.difference_update(missing)
undecided.difference_update(common)
if not undecided:
break
if full:
ui.note(_("sampling from both directions\n"))
sample = _takefullsample(dag, undecided, size=fullsamplesize)
elif common:
# use cheapish initial sample
ui.debug("taking initial sample\n")
sample = _takefullsample(dag, undecided, size=fullsamplesize)
else:
# use even cheaper initial sample
ui.debug("taking quick initial sample\n")
sample = _takequicksample(dag, undecided, size=initialsamplesize,
initial=True)
roundtrips += 1
ui.progress(_('searching'), roundtrips, unit=_('queries'))
ui.debug("query %i; still undecided: %i, sample size is: %i\n"
% (roundtrips, len(undecided), len(sample)))
# indices between sample and externalized version must match
sample = list(sample)
yesno = remote.known(dag.externalizeall(sample))
full = True
result = dag.headsetofconnecteds(common)
ui.progress(_('searching'), None)
ui.debug("%d total queries\n" % roundtrips)
if not result and srvheadhashes != [nullid]:
if abortwhenunrelated:
raise util.Abort(_("repository is unrelated"))
else:
ui.warn(_("warning: repository is unrelated\n"))
return (set([nullid]), True, srvheadhashes,)
anyincoming = (srvheadhashes != [nullid])
return dag.externalizeall(result), anyincoming, srvheadhashes
| apache-2.0 |
Saferman/cupper | structure/structure.py | 1 | 1133 | # -*- coding: utf-8 -*-
from lib.color import *
from lib.common import isLinuxSystem
class meaningColor:
def __init__(self):
if isLinuxSystem():
self.getInput_Color = O #'\033[33m'
self.normal_Color = W #'\033[0m'
self.title_Color = P #'\033[31m'
self.explain_Color = B #'\033[34m'
self.choice_Color = B #'\033[34m'
self.error_Color = ''
self.warn_Color = ''
else:
self.getInput_Color = ''
self.normal_Color = ''
self.title_Color = ''
self.explain_Color = ''
self.choice_Color = ''
self.error_Color = ''
self.warn_Color = ''
class PersonalInformation:
def __init__(self):
self.fullnameList = []
self.nicknameList = []
self.dateList = []
self.phoneList = []
self.oldpasswdList = []
self.keynumbersList = []
self.keywordsList = []
self.lovernameList = []
self.organizationList =[]
self.qq =''
self.weakpasswd = 0
self.situation = {}
| lgpl-3.0 |
py-geek/sankalp-backend | venv/bin/activate_this.py | 1076 | 1137 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| gpl-2.0 |
buildbot/buildbot | master/buildbot/util/lineboundaries.py | 5 | 3125 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from twisted.internet import defer
from buildbot.util.logger import Logger
log = Logger()
class LineBoundaryFinder:
__slots__ = ['partialLine', 'callback', 'warned']
# split at reasonable line length.
# too big lines will fill master's memory, and slow down the UI too much.
MAX_LINELENGTH = 4096
# the lookahead here (`(?=.)`) ensures that `\r` doesn't match at the end
# of the buffer
# we also convert cursor control sequence to newlines
# and ugly \b+ (use of backspace to implement progress bar)
newline_re = re.compile(r'(\r\n|\r(?=.)|\033\[u|\033\[[0-9]+;[0-9]+[Hf]|\033\[2J|\x08+)')
def __init__(self, callback):
self.partialLine = None
self.callback = callback
self.warned = False
def append(self, text):
if self.partialLine:
if len(self.partialLine) > self.MAX_LINELENGTH:
if not self.warned:
# Unfortunately we cannot give more hint as per which log that is
log.warn("Splitting long line: {line_start} {length} "
"(not warning anymore for this log)", line_start=self.partialLine[:30],
length=len(self.partialLine))
self.warned = True
# switch the variables, and return previous _partialLine_,
# split every MAX_LINELENGTH plus a trailing \n
self.partialLine, text = text, self.partialLine
ret = []
while len(text) > self.MAX_LINELENGTH:
ret.append(text[:self.MAX_LINELENGTH])
text = text[self.MAX_LINELENGTH:]
ret.append(text)
return self.callback("\n".join(ret) + "\n")
text = self.partialLine + text
self.partialLine = None
text = self.newline_re.sub('\n', text)
if text:
if text[-1] != '\n':
i = text.rfind('\n')
if i >= 0:
i = i + 1
text, self.partialLine = text[:i], text[i:]
else:
self.partialLine = text
return defer.succeed(None)
return self.callback(text)
return defer.succeed(None)
def flush(self):
if self.partialLine:
return self.append('\n')
return defer.succeed(None)
| gpl-2.0 |
bbc/kamaelia | Code/Python/Kamaelia/Kamaelia/Apps/Grey/MailHandler.py | 6 | 9204 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is primarily used by Kamaelia.Apps.ConcreteMailHandler
# It does form the basis of most things that need to handle basic SMTP type things
#
"""\
=========================
Abstract SMTP Mailer Core
=========================
This component effectively forms the skeleton of an SMTP server. It expects
an SMTP client to connect and send various SMTP requests to it. This basic
SMTP Mailer Core however, does not actually do anything in response to any
of the SMTP commands it expects.
Each SMTP command is actually given a dummy callback which more customised
SMTP protocol handlers are expected to override. Beyond this, this component
is expected to be used as a protocol handler for ServerCore.
Fundamentally, this component handles the command/response structure of SMTP
fairly directly, but expects the brains of the protocol to be implemented by
a more intelligent subclass.
Example Usage
-------------
Whilst this will work to a minimal extent::
ServerCore(protocol=MailHandler, port=1025)
This will not actually form a very interesting SMTP, nor SMTP compliant,
server since whilst it will tell you commands it doesn't understand, it will
not do anything interesting.
You are as noted expected to subclass MailHandler. For a better example
of how to subclass MailHandler you are suggested to look at
Kamaelia.Apps.ConcreteMailHandler.ConcreteMailHandler
Note
----
This component is not complete - you are expected to subclass it to finish
it off as you need. Specifically it does not implement the following:
- It does not enforce "this command followed by that command"
- It does not actually do anything with any DATA a client sends you
- It neither performs local mail delivery nor proxying - you'd need to implement this yourself.
How does it work?
-----------------
The component is expected to be connected to a client TCP connection by
ServerCore, such that messages from the network arrive on inbox "inbox", and
outgoing messages get sent to outbox "outbox"
The component will terminate if any of these is true:
- The client breaks the connection
- One of the methods sets self.breakConnection to True.
- If a "socketShutdown" message arrives on inbox "control"
The main() method divides the connection into effectively two main states:
- accepting random commands prior to getting a DATA command
- accepting the email during a DATA command
SMTP commands are specifically dispatched to a particular handler for that
command. In this component none of the handlers do anything interesting.
Configuration
-------------
The abstract mailer supports some basic config settings:
- logfile - path/filename where requests should get logged
- debuglogfile - path/filename to where the debug log file should do.
Methods you are expected to override
------------------------------------
Whilst you are probably better off subclassing ConcreteMailHandler, you will
probably need to override the following methods in a subclass if you
subclass MailHandler directly.
- handleConnect(self)
- handleHelo(self,command)
- handleEhlo(self,command)
- handleMail(self,command)
- handleRcpt(self,command)
- handleData(self,command)
- handleQuit(self,command)
- handleRset(self,command)
- handleNoop(self,command)
- handleVrfy(self,command)
- handleHelp(self,command)
- logResult(self)
- handleDisconnect(self)
"""
import Axon
from Axon.Ipc import producerFinished, WaitComplete
from Kamaelia.IPC import socketShutdown
class MailHandler(Axon.Component.component):
logfile = "greylist.log"
debuglogfile = "greylist-debug.log"
def __init__(self,**argd):
super(MailHandler, self).__init__(**argd)
self.inbox_log = []
self.line = None
def logging_recv_connection(self):
self.line = self.recv("inbox")
self.inbox_log.append(self.line)
def getline(self):
control_message = ""
while 1:
while not self.anyReady():
self.pause(); # print "PAUSING", repr(self.inbox_log), repr(self.line)
yield 1
while self.dataReady("control"):
control_message = self.recv("control")
if isinstance(control_message, socketShutdown):
self.client_connected = False
if self.dataReady("inbox"):
self.logging_recv_connection()
return
else:
if not self.client_connected :
self.breakConnection = True
return
yield 1
def handleCommand(self,command):
if len(command) < 1:
self.netPrint("500 Sorry we don't like broken mailers")
self.breakConnection = True
return
if command[0] == "HELO": return self.handleHelo(command) # RFC 2821 4.5.1 required
if command[0] == "EHLO": return self.handleEhlo(command) # RFC 2821 4.5.1 required
if command[0] == "MAIL": return self.handleMail(command) # RFC 2821 4.5.1 required
if command[0] == "RCPT": return self.handleRcpt(command) # RFC 2821 4.5.1 required
if command[0] == "DATA": return self.handleData(command) # RFC 2821 4.5.1 required
if command[0] == "QUIT": return self.handleQuit(command) # RFC 2821 4.5.1 required
if command[0] == "RSET": return self.handleRset(command) # RFC 2821 4.5.1 required
if command[0] == "NOOP": return self.handleNoop(command) # RFC 2821 4.5.1 required
if command[0] == "VRFY": return self.handleVrfy(command) # RFC 2821 4.5.1 required
if command[0] == "HELP": return self.handleHelp(command)
self.netPrint("500 Sorry we don't like broken mailers")
self.breakConnection = True
def noteToLog(self, line):
try:
x = open(self.logfile,"a")
except IOError:
x = open(self.logfile,"w")
x.write(line+"\n")
x.flush()
x.close()
def noteToDebugLog(self, line):
try:
x = open(self.debuglogfile,"a")
except IOError:
x = open(self.debuglogfile,"w")
x.write(line+"\n")
x.flush()
x.close()
def netPrint(self, *args):
for i in args:
self.noteToDebugLog(i)
self.send(i+"\r\n", "outbox")
def handleConnect(self): pass
def handleHelo(self,command): pass
def handleEhlo(self,command): pass
def handleMail(self,command): pass
def handleRcpt(self,command): pass
def handleData(self,command): pass
def handleQuit(self,command): pass
def handleRset(self,command): pass
def handleNoop(self,command): pass
def handleVrfy(self,command): pass
def handleHelp(self,command): pass
def logResult(self): pass
def handleDisconnect(self): yield 1
def lastline(self):
if self.line == ".\r\n":
return True
if len(self.line) >=5:
if self.line[-5:] == "\r\n.\r\n":
return True
if len(self.line) >=5:
if self.line[-5:] == "\r\n.\r\n":
return True
if len(self.line) >=4:
if self.line[-4:] == "\n.\r\n":
return True
return False
def main(self):
brokenClient = False
self.handleConnect()
self.gettingdata = False
self.client_connected = True
self.breakConnection = False
while (not self.gettingdata) and (not self.breakConnection):
yield WaitComplete(self.getline(), tag="_getline1")
try:
command = self.line.split()
except AttributeError:
brokenClient = True
break
self.handleCommand(command)
if not brokenClient:
if (not self.breakConnection):
EndOfMessage = False
self.netPrint('354 Enter message, ending with "." on a line by itself')
while not EndOfMessage:
yield WaitComplete(self.getline(), tag="getline2")
if self.lastline():
EndOfMessage = True
self.netPrint("250 OK id-deferred")
self.send(producerFinished(),"signal")
if not brokenClient:
yield WaitComplete(self.handleDisconnect(),tag="_handleDisconnect")
self.logResult()
__kamaelia_components__ = ( MailHandler, )
| apache-2.0 |
jendap/tensorflow | tensorflow/python/ops/distributions/gamma.py | 13 | 12177 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Gamma",
"GammaWithSoftplusConcentrationRate",
]
@tf_export(v1=["distributions.Gamma"])
class Gamma(distribution.Distribution):
"""Gamma distribution.
The Gamma distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z
Z = Gamma(alpha) beta**(-alpha)
```
where:
* `concentration = alpha`, `alpha > 0`,
* `rate = beta`, `beta > 0`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha)
```
where `GammaInc` is the [lower incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2 = concentration / mean
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples of this distribution are always non-negative. However,
the samples that are smaller than `np.finfo(dtype).tiny` are rounded
to this value, so it appears more often than it should.
This should only be noticeable when the `concentration` is very small, or the
`rate` is very large. See note in `tf.random_gamma` docstring.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.Gamma(concentration=3.0, rate=2.0)
dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
Compute the gradients of samples w.r.t. the parameters:
```python
concentration = tf.constant(3.0)
rate = tf.constant(2.0)
dist = tfd.Gamma(concentration, rate)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [concentration, rate])
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.igamma(self.concentration, self.rate * x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.xlogy(self.concentration - 1., x) - self.rate * x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
- math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
+ ((1. - self.concentration) *
math_ops.digamma(self.concentration)))
def _mean(self):
return self.concentration / self.rate
def _variance(self):
return self.concentration / math_ops.square(self.rate)
def _stddev(self):
return math_ops.sqrt(self.concentration) / self.rate
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(shape - 1) / rate` when
`shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.concentration - 1.) / self.rate
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mode, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="mode not defined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class GammaWithSoftplusConcentrationRate(Gamma):
"""`Gamma` with softplus of `concentration` and `rate`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Gamma(tf.nn.softplus(concentration), "
"tf.nn.softplus(rate))` instead.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Gamma, Gamma)
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))
| apache-2.0 |
jcollado/rabbithole | travis_pypi_setup.py | 1 | 3757 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'jcollado/rabbithole'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| mit |
xxsergzzxx/python-for-android | python3-alpha/python3-src/Lib/test/list_tests.py | 53 | 16636 | """
Tests common to list and UserList.UserList
"""
import sys
import os
from functools import cmp_to_key
from test import support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
l0 = []
for i in range(sys.getrecursionlimit() + 100):
l0 = [l0]
self.assertRaises(RuntimeError, repr, l0)
def test_print(self):
d = self.type2test(range(200))
d.append(d)
d.extend(range(200,400))
d.append(d)
d.append(400)
try:
with open(support.TESTFN, "w") as fo:
fo.write(str(d))
with open(support.TESTFN, "r") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, next, r)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0] = 1
a[1] = 2
a[2] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2] = 88
a[-1] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in range(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
if a == b:
return 0
elif a < b:
return 1
else: # a > b
return -1
u.sort(key=cmp_to_key(revcmp))
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
xmod, ymod = x%3, y%7
if xmod == ymod:
return 0
elif xmod < ymod:
return -1
else: # xmod > ymod
return 1
z = self.type2test(range(12))
z.sort(key=cmp_to_key(myComparison))
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
if x == y:
return 0
elif x < y:
return -1
else: # x > y
return 1
self.assertRaises(ValueError, z.sort,
key=cmp_to_key(selfmodifyingComparison))
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super().test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
| apache-2.0 |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/deps2git/git_tools.py | 6 | 10469 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import os
import re
import subprocess
import sys
import threading
try:
import git_cache
except ImportError:
for p in os.environ['PATH'].split(os.pathsep):
if (os.path.basename(p) == 'depot_tools' and
os.path.exists(os.path.join(p, 'git_cache.py'))):
sys.path.append(p)
import git_cache
# Show more information about the commands being executed.
VERBOSE = False
# The longest any single subprocess will be allowed to run.
TIMEOUT = 40 * 60
class AbnormalExit(Exception):
pass
class StdioBuffer(object):
def __init__(self, name, out_queue):
self.closed = False
self.line_buffer = cStringIO.StringIO()
self.name = name
self.out_q = out_queue
def write(self, msg):
"""Write into the buffer. Only one thread should call write() at a time."""
assert not self.closed
self.line_buffer.write(msg)
# We can use '\n' instead of os.linesep because universal newlines is
# set to true below.
if '\n' in msg:
# We can assert that lines is at least 2 items if '\n' is present.
lines = self.line_buffer.getvalue().split('\n')
for line in lines[:-1]:
self.out_q.put('%s> %s' % (self.name, line))
self.line_buffer.close()
self.line_buffer = cStringIO.StringIO()
self.line_buffer.write(lines[-1])
def close(self):
# Empty out the line buffer.
self.write('\n')
self.out_q.put(None)
self.closed = True
def GetStatusOutput(cmd, cwd=None, out_buffer=None):
"""Return (status, output) of executing cmd in a shell."""
if VERBOSE:
print >> sys.stderr, ''
print >> sys.stderr, '[DEBUG] Running "%s"' % cmd
def _thread_main():
thr = threading.current_thread()
thr.status = -1
thr.stdout = ''
thr.stderr = '<timeout>'
try:
if out_buffer:
proc = subprocess.Popen(cmd, shell=True,
cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
buf = proc.stdout.read(1)
if buf == '\r': # We want carriage returns in Linux to be newlines.
buf = '\n'
if not buf:
break
out_buffer.write(buf)
stdout = ''
proc.wait()
out_buffer.close()
else:
proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,
cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(stdout, _) = proc.communicate()
except Exception, e:
thr.status = -1
thr.stdout = ''
thr.stderr = repr(e)
else:
thr.status = proc.returncode
thr.stdout = stdout
thr.stderr = ''
thr = threading.Thread(target=_thread_main)
thr.daemon = True
thr.start()
thr.join(TIMEOUT)
# pylint: disable=E1101
if VERBOSE:
short_output = ' '.join(thr.stdout.splitlines())
short_output = short_output.strip(' \t\n\r')
print >> sys.stderr, (
'[DEBUG] Output: %d, %-60s' % (thr.status, short_output))
return (thr.status, thr.stdout)
def Git(git_repo, command, is_mirror=False, out_buffer=None):
"""Execute a git command within a local git repo."""
if is_mirror:
if git_repo:
cmd = 'git --git-dir=%s %s' % (git_repo, command)
else:
cmd = 'git %s' % command
cwd = None
else:
cmd = 'git %s' % command
cwd = git_repo
(status, output) = GetStatusOutput(cmd, cwd, out_buffer)
# For Abnormal Exit, Windows returns -1, Posix returns 128.
if status in [-1, 128]:
raise AbnormalExit('Failed to run %s. Exited Abnormally. output %s' %
(cmd, output))
elif status != 0:
raise Exception('Failed to run %s. error %d. output %s' % (cmd, status,
output))
return (status, output)
def Clone(git_url, git_repo, is_mirror, out_buffer=None):
"""Clone a repository."""
cmd = 'clone'
if is_mirror:
cmd += ' --mirror'
cmd += ' %s %s' % (git_url, git_repo)
if not is_mirror and not os.path.exists(git_repo):
os.makedirs(git_repo)
return Git(None, cmd, is_mirror=is_mirror, out_buffer=out_buffer)
def PopulateCache(git_url, shallow=False):
# --shallow by default checks out 10000 revision, but for really large
# repos like adobe ones, we want significantly less than 10000.
depth = None
if shallow and 'adobe' in git_url:
depth = 10
mirror = git_cache.Mirror(git_url, print_func=lambda *args: None)
mirror.populate(depth=depth, shallow=shallow)
return mirror.mirror_path
def Fetch(git_repo, git_url, is_mirror):
"""Fetch the latest objects for a given git repository."""
# Always update the upstream url
Git(git_repo, 'config remote.origin.url %s' % git_url)
Git(git_repo, 'fetch origin', is_mirror)
def Ping(git_repo, verbose=False):
"""Confirm that a remote repository URL is valid."""
status, stdout = GetStatusOutput('git ls-remote ' + git_repo)
if status != 0 and verbose:
print >> sys.stderr, stdout
return status == 0
def CreateLessThanOrEqualRegex(number):
""" Return a regular expression to test whether an integer less than or equal
to 'number' is present in a given string.
"""
# In three parts, build a regular expression that match any numbers smaller
# than 'number'.
# For example, 78656 would give a regular expression that looks like:
# Part 1
# (78356| # 78356
# Part 2
# 7835[0-5]| # 78350-78355
# 783[0-4][0-9]| # 78300-78349
# 78[0-2][0-9][0-9]| # 78000-78299
# 7[0-7][0-9][0-9][0-9]| # 70000-77999
# [0-6][0-9][0-9][0-9][0-9]| # 10000-69999
# Part 3
# [0-9][0-9][0-9][0-9]| # 1000-9999
# [0-9][0-9][0-9]| # 100-999
# [0-9][0-9]| # 10-99
# [0-9]) # 0-9
# Part 1: Create an array with all the regexes, as described above.
# Prepopulate it with the number itself.
number = str(number)
expressions = [number]
# Convert the number to a list, so we can translate digits in it to
# expressions.
num_list = list(number)
num_len = len(num_list)
# Part 2: Go through all the digits in the number, starting from the end.
# Each iteration appends a line to 'expressions'.
for index in range (num_len - 1, -1, -1):
# Convert this digit back to an integer.
digit = int(num_list[index])
# Part 2.1: No processing if this digit is a zero.
if digit == 0:
continue
# Part 2.2: We switch the current digit X by a range "[0-(X-1)]".
num_list[index] = '[0-%d]' % (digit - 1)
# Part 2.3: We set all following digits to be "[0-9]".
# Since we just decrementented a digit in a most important position, all
# following digits don't matter. The possible numbers will always be smaller
# than before we decremented.
for next_digit in range(index + 1, num_len):
num_list[next_digit] = '[0-9]'
# Part 2.4: Add this new sub-expression to the list.
expressions.append(''.join(num_list))
# Part 3: We add all the full ranges to match all numbers that are at least
# one order of magnitude smaller than the original numbers.
for index in range(1, num_len):
expressions.append('[0-9]'*index)
# All done. We now have our final regular expression.
regex = '(%s)' % ('|'.join(expressions))
return regex
class SearchError(Exception):
pass
def _SearchImpl(git_repo, svn_rev, is_mirror, refspec, fetch_url, regex):
def _FindRevForCommitish(git_repo, commitish, is_mirror):
_, output = Git(git_repo, 'cat-file commit %s' % commitish, is_mirror)
match = re.match(r'git-svn-id: [^\s@]+@(\d+) \S+$', output.splitlines()[-1])
if match:
return int(match.group(1))
else:
# The last commit isn't from svn, but maybe the repo was converted to pure
# git at some point, so the last svn commit is somewhere farther back.
_, output = Git(
git_repo, ('log -E --grep="^git-svn-id: [^@]*@[0-9]* [A-Za-z0-9-]*$" '
'-1 --format="%%H" %s') % commitish, is_mirror)
assert output, 'no match on %s' % commitish
# Check if svn_rev is newer than the current refspec revision.
try:
found_rev = _FindRevForCommitish(git_repo, refspec, is_mirror)
# Sometimes this fails because it's looking in a branch that hasn't been
# fetched from upstream yet. Let it fetch and try again.
except AbnormalExit:
found_rev = None
if (not found_rev or found_rev < int(svn_rev)) and fetch_url:
if VERBOSE:
print >> sys.stderr, (
'Fetching %s %s [%s < %s]' % (git_repo, refspec, found_rev, svn_rev))
Fetch(git_repo, fetch_url, is_mirror)
found_rev = _FindRevForCommitish(git_repo, refspec, is_mirror)
# Find the first commit matching the given git-svn-id regex.
_, output = Git(
git_repo,
('log -E --grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" '
'-1 --format="%%H" %s') % (regex, refspec),
is_mirror)
output = output.strip()
if not re.match('^[0-9a-fA-F]{40}$', output):
raise SearchError('Cannot find revision %s in %s:%s' % (svn_rev, git_repo,
refspec))
# Check if it actually matched the svn_rev that was requested.
found_rev = _FindRevForCommitish(git_repo, output, is_mirror)
found_msg = svn_rev
if found_rev != int(svn_rev):
found_msg = '%s [actual: %s]' % (svn_rev, found_rev)
print >> sys.stderr, '%s: %s <-> %s' % (git_repo, output, found_msg)
return output
def SearchExact(git_repo, svn_rev, is_mirror, refspec='FETCH_HEAD',
fetch_url=None):
"""Return the Git commit id exactly matching the given SVN revision.
If fetch_url is not None, will update repo if revision is newer."""
regex = str(svn_rev)
return _SearchImpl(git_repo, svn_rev, is_mirror, refspec, fetch_url, regex)
def Search(git_repo, svn_rev, is_mirror, refspec='FETCH_HEAD', fetch_url=None):
"""Return the Git commit id fuzzy matching the given SVN revision.
If fetch_url is not None, will update repo if revision is newer."""
regex = CreateLessThanOrEqualRegex(svn_rev)
return _SearchImpl(git_repo, svn_rev, is_mirror, refspec, fetch_url, regex)
| bsd-3-clause |
oliverlee/sympy | sympy/polys/domains/complexfield.py | 93 | 3492 | """Implementation of :class:`ComplexField` class. """
from __future__ import print_function, division
from sympy.core.numbers import Float, I
from sympy.utilities import public
from sympy.polys.domains.field import Field
from sympy.polys.domains.simpledomain import SimpleDomain
from sympy.polys.domains.characteristiczero import CharacteristicZero
from sympy.polys.domains.mpelements import MPContext
from sympy.polys.polyerrors import DomainError, CoercionFailed
@public
class ComplexField(Field, CharacteristicZero, SimpleDomain):
"""Complex numbers up to the given precision. """
rep = 'CC'
is_ComplexField = is_CC = True
is_Exact = False
is_Numerical = True
has_assoc_Ring = False
has_assoc_Field = True
_default_precision = 53
@property
def has_default_precision(self):
return self.precision == self._default_precision
@property
def precision(self):
return self._context.prec
@property
def dps(self):
return self._context.dps
@property
def tolerance(self):
return self._context.tolerance
def __init__(self, prec=_default_precision, dps=None, tol=None):
context = MPContext(prec, dps, tol)
context._parent = self
self._context = context
self.dtype = context.mpc
self.zero = self.dtype(0)
self.one = self.dtype(1)
def __eq__(self, other):
return (isinstance(other, ComplexField)
and self.precision == other.precision
and self.tolerance == other.tolerance)
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.precision, self.tolerance))
def to_sympy(self, element):
"""Convert ``element`` to SymPy number. """
return Float(element.real, self.dps) + I*Float(element.imag, self.dps)
def from_sympy(self, expr):
"""Convert SymPy's number to ``dtype``. """
number = expr.evalf(n=self.dps)
real, imag = number.as_real_imag()
if real.is_Number and imag.is_Number:
return self.dtype(real, imag)
else:
raise CoercionFailed("expected complex number, got %s" % expr)
def from_ZZ_python(self, element, base):
return self.dtype(element)
def from_QQ_python(self, element, base):
return self.dtype(element.numerator) / element.denominator
def from_ZZ_gmpy(self, element, base):
return self.dtype(int(element))
def from_QQ_gmpy(self, element, base):
return self.dtype(int(element.numerator)) / int(element.denominator)
def from_RealField(self, element, base):
return self.dtype(element)
def from_ComplexField(self, element, base):
if self == base:
return element
else:
return self.dtype(element)
def get_ring(self):
"""Returns a ring associated with ``self``. """
raise DomainError("there is no ring associated with %s" % self)
def get_exact(self):
"""Returns an exact domain associated with ``self``. """
raise DomainError("there is no exact domain associated with %s" % self)
def gcd(self, a, b):
"""Returns GCD of ``a`` and ``b``. """
return self.one
def lcm(self, a, b):
"""Returns LCM of ``a`` and ``b``. """
return a*b
def almosteq(self, a, b, tolerance=None):
"""Check if ``a`` and ``b`` are almost equal. """
return self._context.almosteq(a, b, tolerance)
| bsd-3-clause |
lunafeng/django | django/forms/models.py | 72 | 54654 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.virtual_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_virtual_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages,
opts.field_classes)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, exclude)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.remote_field.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.remote_field.model == parent_model
or f.remote_field.model in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
for obj in self.queryset.iterator():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
required, widget, label, initial, help_text, *args, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| bsd-3-clause |
ContinuumIO/blaze | blaze/tests/test_interactive.py | 3 | 11788 | import textwrap
import datetime
import pickle
import sys
from types import MethodType
from datashape import dshape
from datashape.util.testing import assert_dshape_equal
import pandas as pd
import pandas.util.testing as tm
import pytest
import numpy as np
from odo import into, append
from odo.backends.csv import CSV
from blaze import discover
from blaze.compute import compute
from blaze.expr import data, symbol
from blaze.interactive import (
concrete_head,
expr_repr,
to_html,
)
from blaze.utils import tmpfile, example
tdata = (('Alice', 100),
('Bob', 200))
L = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
t = data(tdata, fields=['name', 'amount'])
x = np.ones((2, 2))
def test_len():
assert len(t) == 2
assert len(t.name) == 2
def test_repr():
result = expr_repr(t['name'])
print(result)
assert isinstance(result, str)
assert 'Alice' in result
assert 'Bob' in result
assert '...' not in result
result = expr_repr(t['amount'] + 1)
print(result)
assert '101' in result
t2 = data(tuple((i, i**2) for i in range(100)), fields=['x', 'y'])
assert t2.dshape == dshape('100 * {x: int64, y: int64}')
result = expr_repr(t2)
print(result)
assert len(result.split('\n')) < 20
assert '...' in result
def test_repr_of_scalar():
assert expr_repr(t.amount.sum()) == '300'
def test_mutable_backed_repr():
mutable_backed_table = data([[0]], fields=['col1'])
expr_repr(mutable_backed_table)
def test_dataframe_backed_repr():
df = pd.DataFrame(data=[0], columns=['col1'])
dataframe_backed_table = data(df)
expr_repr(dataframe_backed_table)
def test_dataframe_backed_repr_complex():
df = pd.DataFrame([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
columns=['id', 'name', 'balance'])
t = data(df)
expr_repr(t[t['balance'] < 0])
def test_repr_html_on_no_resources_symbol():
t = symbol('t', '5 * {id: int, name: string, balance: int}')
assert to_html(t) == 't'
def test_expr_repr_empty():
s = expr_repr(t[t.amount > 1e9])
assert isinstance(s, str)
assert 'amount' in s
def test_to_html():
s = to_html(t)
assert s
assert 'Alice' in s
assert '<table' in s
assert to_html(1) == '1'
assert to_html(t.count()) == '2'
def test_to_html_on_arrays():
s = to_html(data(np.ones((2, 2))))
assert '1' in s
assert 'br>' in s
def test_serialization():
t2 = pickle.loads(pickle.dumps(t, protocol=pickle.HIGHEST_PROTOCOL))
assert t.schema == t2.schema
assert t._name == t2._name
def test_table_resource():
with tmpfile('csv') as filename:
ds = dshape('var * {a: int, b: int}')
csv = CSV(filename)
append(csv, [[1, 2], [10, 20]], dshape=ds)
t = data(filename)
assert isinstance(t.data, CSV)
assert into(list, compute(t)) == into(list, csv)
def test_explicit_override_dshape():
ds = dshape("""var * {a: ?float64,
b: ?string,
c: ?float32}""")
# If not overridden, the dshape discovery will return:
# var * {a: int64, b: string, c: int64}.
s = textwrap.dedent("""\
a,b,c
1,x,3
2,y,4
3,z,5
""")
with tmpfile('.csv') as filename:
with open(filename, 'w') as fd:
fd.write(s)
bdf = data(filename, dshape=ds)
assert bdf.dshape == ds
def test_concretehead_failure():
t = symbol('t', 'var * {x:int, y:int}')
d = t[t['x'] > 100]
with pytest.raises(ValueError):
concrete_head(d)
def test_into_np_ndarray_column():
t = data(L, fields=['id', 'name', 'balance'])
expr = t[t.balance < 0].name
colarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(colarray)
def test_into_nd_array_selection():
t = data(L, fields=['id', 'name', 'balance'])
expr = t[t['balance'] < 0]
selarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(selarray)
def test_into_nd_array_column_failure():
tble = data(L, fields=['id', 'name', 'balance'])
expr = tble[tble['balance'] < 0]
colarray = into(np.ndarray, expr)
assert len(list(compute(expr))) == len(colarray)
def test_Data_attribute_repr():
t = data(CSV(example('accounts-datetimes.csv')))
result = t.when.day
expected = pd.DataFrame({'when_day': [1, 2, 3, 4, 5]})
assert expr_repr(result) == repr(expected)
def test_can_trivially_create_csv_data():
data(example('iris.csv'))
# in context
with data(example('iris.csv')) as d:
assert d is not None
def test_can_trivially_create_csv_Data_with_unicode():
if sys.version[0] == '2':
assert isinstance(data(example(u'iris.csv')).data, CSV)
def test_can_trivially_create_sqlite_table():
pytest.importorskip('sqlalchemy')
data('sqlite:///'+example('iris.db')+'::iris')
# in context
with data('sqlite:///'+example('iris.db')+'::iris') as d:
assert d is not None
@pytest.mark.xfail(sys.platform != 'darwin', reason="h5py/pytables mismatch")
@pytest.mark.skipif(sys.version_info[:2] == (3, 4) and sys.platform == 'win32',
reason='PyTables + Windows + Python 3.4 crashes')
def test_can_trivially_create_pytables():
pytest.importorskip('tables')
with data(example('accounts.h5')+'::/accounts') as d:
assert d is not None
def test_data_passes_kwargs_to_resource():
assert data(example('iris.csv'), encoding='ascii').data.encoding == 'ascii'
def test_data_on_iterator_refies_data():
tdata = [1, 2, 3]
d = data(iter(tdata))
assert into(list, d) == tdata
assert into(list, d) == tdata
# in context
with data(iter(tdata)) as d:
assert d is not None
def test_Data_on_json_is_concrete():
d = data(example('accounts-streaming.json'))
assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500
assert compute(d.amount.sum()) == 100 - 200 + 300 + 400 - 500
def test_repr_on_nd_array_doesnt_err():
d = data(np.ones((2, 2, 2)))
expr_repr(d + 1)
def test_generator_reprs_concretely():
x = [1, 2, 3, 4, 5, 6]
d = data(x)
expr = d[d > 2] + 1
assert '4' in expr_repr(expr)
def test___array__():
x = np.ones(4)
d = data(x)
assert (np.array(d + 1) == x + 1).all()
d = data(x[:2])
x[2:] = d + 1
assert x.tolist() == [1, 1, 2, 2]
def test_python_scalar_protocols():
d = data(1)
assert int(d + 1) == 2
assert float(d + 1.0) == 2.0
assert bool(d > 0) is True
assert complex(d + 1.0j) == 1 + 1.0j
@pytest.mark.xfail(
reason="DataFrame constructor doesn't yet support __array__"
)
def test_DataFrame():
x = np.array([(1, 2), (1., 2.)], dtype=[('a', 'i4'), ('b', 'f4')])
d = data(x)
assert isinstance(pd.DataFrame(d), pd.DataFrame)
def test_head_compute():
tdata = tm.makeMixedDataFrame()
t = symbol('t', discover(tdata))
db = into('sqlite:///:memory:::t', tdata, dshape=t.dshape)
n = 2
d = data(db)
# skip the header and the ... at the end of the repr
expr = d.head(n)
s = expr_repr(expr)
assert '...' not in s
result = s.split('\n')[1:]
assert len(result) == n
def test_scalar_sql_compute():
t = into('sqlite:///:memory:::t', tdata,
dshape=dshape('var * {name: string, amount: int}'))
d = data(t)
assert expr_repr(d.amount.sum()) == '300'
def test_no_name_for_simple_data():
d = data([1, 2, 3])
assert expr_repr(d) == ' \n0 1\n1 2\n2 3'
assert not d._name
d = data(1)
assert not d._name
assert expr_repr(d) == '1'
def test_coerce_date_and_datetime():
x = datetime.datetime.now().date()
d = data(x)
assert expr_repr(d) == repr(x)
x = pd.Timestamp.now()
d = data(x)
assert expr_repr(d) == repr(x)
x = np.nan
d = data(x, dshape='datetime')
assert expr_repr(d) == repr(pd.NaT)
x = float('nan')
d = data(x, dshape='datetime')
assert expr_repr(d) == repr(pd.NaT)
def test_coerce_timedelta():
x = datetime.timedelta(days=1, hours=2, minutes=3)
d = data(x)
assert expr_repr(d) == repr(x)
def test_highly_nested_repr():
tdata = [[0, [[1, 2], [3]], 'abc']]
d = data(tdata)
assert 'abc' in expr_repr(d.head())
def test_asarray_fails_on_different_column_names():
vs = {'first': [2., 5., 3.],
'second': [4., 1., 4.],
'third': [6., 4., 3.]}
df = pd.DataFrame(vs)
with pytest.raises(ValueError) as excinfo:
data(df, fields=list('abc'))
inmsg = ("data(data_source).relabel(first='a', second='b', third='c')"
" to rename")
assert inmsg in str(excinfo.value)
def test_functions_as_bound_methods():
"""
Test that all functions on a _Data object are instance methods
of that object.
"""
# Filter out __class__ and friends that are special, these can be
# callables without being instance methods.
callable_attrs = filter(
callable, (
getattr(t, a, None)
for a in dir(t)
if not (a.startswith('__') or a.endswith('__'))
),
)
for attr in callable_attrs:
assert isinstance(attr, MethodType)
# Make sure this is bound to the correct object.
assert attr.__self__ is t or attr.__self__ is type(t) # classmethod
def test_all_string_infer_header():
sdata = """x,tl,z
Be careful driving.,hy,en
Be careful.,hy,en
Can you translate this for me?,hy,en
Chicago is very different from Boston.,hy,en
Don't worry.,hy,en"""
with tmpfile('.csv') as fn:
with open(fn, 'w') as f:
f.write(sdata)
tdata = data(fn, has_header=True)
assert tdata.data.has_header
assert tdata.fields == ['x', 'tl', 'z']
def test_csv_with_trailing_commas():
with tmpfile('.csv') as fn:
with open(fn, 'wt') as f:
# note the trailing space in the header
f.write('a,b,c, \n1, 2, 3, ')
csv = CSV(fn)
assert expr_repr(data(fn))
assert discover(csv).measure.names == [
'a', 'b', 'c', ''
]
with tmpfile('.csv') as fn:
with open(fn, 'wt') as f:
f.write('a,b,c,\n1, 2, 3, ') # NO trailing space in the header
csv = CSV(fn)
assert expr_repr(data(fn))
assert discover(csv).measure.names == [
'a', 'b', 'c', 'Unnamed: 3'
]
def test_pickle_roundtrip():
ds = data(1)
assert ds.isidentical(
pickle.loads(pickle.dumps(ds, protocol=pickle.HIGHEST_PROTOCOL)),
)
assert (ds + 1).isidentical(
pickle.loads(pickle.dumps(ds + 1, protocol=pickle.HIGHEST_PROTOCOL)),
)
es = data(np.array([1, 2, 3]))
rs = pickle.loads(pickle.dumps(es, protocol=pickle.HIGHEST_PROTOCOL))
assert (es.data == rs.data).all()
assert_dshape_equal(es.dshape, rs.dshape)
def test_nameless_data():
tdata = [('a', 1)]
assert repr(tdata) in expr_repr(data(tdata))
def test_partially_bound_expr():
df = pd.DataFrame([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
columns=['id', 'name', 'balance'])
tdata = data(df, name='data')
a = symbol('a', 'int')
expr = tdata.name[tdata.balance > a]
assert expr_repr(expr) == 'data[data.balance > a].name'
| bsd-3-clause |
baylee/django | tests/forms_tests/widget_tests/test_clearablefileinput.py | 8 | 5038 | from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import ClearableFileInput
from django.utils.encoding import python_2_unicode_compatible
from .base import WidgetTest
@python_2_unicode_compatible
class FakeFieldFile(object):
"""
Quacks like a FieldFile (has a .url and unicode representation), but
doesn't require us to care about storages etc.
"""
url = 'something'
def __str__(self):
return self.url
class ClearableFileInputTest(WidgetTest):
widget = ClearableFileInput()
def test_clear_input_renders(self):
"""
A ClearableFileInput with is_required False and rendered with an
initial value that is a file renders a clear checkbox.
"""
self.check_html(self.widget, 'myfile', FakeFieldFile(), html=(
"""
Currently: <a href="something">something</a>
<input type="checkbox" name="myfile-clear" id="myfile-clear_id" />
<label for="myfile-clear_id">Clear</label><br />
Change: <input type="file" name="myfile" />
"""
))
def test_html_escaped(self):
"""
A ClearableFileInput should escape name, filename, and URL
when rendering HTML (#15182).
"""
@python_2_unicode_compatible
class StrangeFieldFile(object):
url = "something?chapter=1§=2©=3&lang=en"
def __str__(self):
return '''something<div onclick="alert('oops')">.jpg'''
self.check_html(ClearableFileInput(), 'my<div>file', StrangeFieldFile(), html=(
"""
Currently: <a href="something?chapter=1&sect=2&copy=3&lang=en">
something<div onclick="alert('oops')">.jpg</a>
<input type="checkbox" name="my<div>file-clear" id="my<div>file-clear_id" />
<label for="my<div>file-clear_id">Clear</label><br />
Change: <input type="file" name="my<div>file" />
"""
))
def test_clear_input_renders_only_if_not_required(self):
"""
A ClearableFileInput with is_required=False does not render a clear
checkbox.
"""
widget = ClearableFileInput()
widget.is_required = True
self.check_html(widget, 'myfile', FakeFieldFile(), html=(
"""
Currently: <a href="something">something</a> <br />
Change: <input type="file" name="myfile" />
"""
))
def test_clear_input_renders_only_if_initial(self):
"""
A ClearableFileInput instantiated with no initial value does not render
a clear checkbox.
"""
self.check_html(self.widget, 'myfile', None, html='<input type="file" name="myfile" />')
def test_clear_input_checked_returns_false(self):
"""
ClearableFileInput.value_from_datadict returns False if the clear
checkbox is checked, if not required.
"""
value = self.widget.value_from_datadict(
data={'myfile-clear': True},
files={},
name='myfile',
)
self.assertEqual(value, False)
def test_clear_input_checked_returns_false_only_if_not_required(self):
"""
ClearableFileInput.value_from_datadict never returns False if the field
is required.
"""
widget = ClearableFileInput()
widget.is_required = True
field = SimpleUploadedFile('something.txt', b'content')
value = widget.value_from_datadict(
data={'myfile-clear': True},
files={'myfile': field},
name='myfile',
)
self.assertEqual(value, field)
def test_html_does_not_mask_exceptions(self):
"""
A ClearableFileInput should not mask exceptions produced while
checking that it has a value.
"""
@python_2_unicode_compatible
class FailingURLFieldFile(object):
@property
def url(self):
raise ValueError('Canary')
def __str__(self):
return 'value'
with self.assertRaisesMessage(ValueError, 'Canary'):
self.widget.render('myfile', FailingURLFieldFile())
def test_url_as_property(self):
@python_2_unicode_compatible
class URLFieldFile(object):
@property
def url(self):
return 'https://www.python.org/'
def __str__(self):
return 'value'
html = self.widget.render('myfile', URLFieldFile())
self.assertInHTML('<a href="https://www.python.org/">value</a>', html)
def test_return_false_if_url_does_not_exists(self):
@python_2_unicode_compatible
class NoURLFieldFile(object):
def __str__(self):
return 'value'
html = self.widget.render('myfile', NoURLFieldFile())
self.assertHTMLEqual(html, '<input name="myfile" type="file" />')
| bsd-3-clause |
asridharan/dcos | packages/adminrouter/extra/src/test-harness/modules/mocker/jwt.py | 13 | 4195 | # Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""This module provides a set tools for generating JSON Web Tokens
Attributes:
AUTHTOKEN_LIFETIME_SECONDS (int): default token validity period, measured from
time.time(), expressed in seconds
"""
import logging
import time
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
log = logging.getLogger(__name__)
AUTHTOKEN_LIFETIME_SECONDS = 3600
def load_key(key_path):
"""Load a key from a file
`ascii` encoding is assumed as the key will be either RSA PEM, or base64
encoded shared secret. The contents are stripped.
Args:
key_path (str): path to the file that contains the key
Returns:
The contents of the file in `key_path` path
Raises:
OSError: problem occured while loading the file
"""
try:
with open(key_path, 'r', encoding='ascii') as fh:
key_data = fh.read().strip()
except OSError:
log.exception('Cannot read key file `%s`', key_path)
raise
return key_data
def decode_pem_key(key_pem):
"""Convert plaintext PEM key into the format usable for JWT generation
Args:
key_pam (str): key data in PEM format, presented as plain string
Returns:
Parsed PEM data
"""
private_key = serialization.load_pem_private_key(
data=key_pem.encode('ascii'),
password=None,
backend=default_backend())
msg = 'Unexpected private key type'
assert isinstance(private_key, rsa.RSAPrivateKey), msg
assert private_key.key_size >= 2048, 'RSA key size too small'
return private_key
def generate_rs256_jwt(
key_path, uid, exp=None, skip_uid_claim=False, skip_exp_claim=False):
"""Generate a RS256 JSON Web Token
Args:
key_path (str): path to the private key encoded in PEM format, which will be
used for token signing/encoding
uid (str): a value of `uid` JWT claim that should be set in the token
exp (int): a value of `exp` JWT claim that should be set in the token,
by default it's AUTHTOKEN_LIFETIME_SECONDS seconds from now.
skip_uid_claim (bool): specifies whether the UID claim should be present
in the token
skip_exp_claim (bool): specifies whether the `exp` claim should be present
in the token
Returns:
A JSON Web Token string that can be used directly in HTTP headers/cookies/etc...
"""
if exp is None:
exp = time.time() + AUTHTOKEN_LIFETIME_SECONDS
payload = {"uid": uid,
"exp": int(exp)}
if skip_uid_claim:
del payload['uid']
if skip_exp_claim:
del payload['exp']
key_pem = load_key(key_path)
key = decode_pem_key(key_pem)
jwt_token = jwt.encode(payload, key, algorithm='RS256').decode('ascii')
return jwt_token
def generate_hs256_jwt(
key_path, uid, exp=None, skip_uid_claim=False, skip_exp_claim=False):
"""Generate a HS256 JSON Web Token
Args:
key_path (str): path to shared secret, which will be used for token
signing/encoding
uid (str): a value of `uid` JWT claim that should be set in the token
exp (int): a value of `exp` JWT claim that should be set in the token,
by default it's AUTHTOKEN_LIFETIME_SECONDS seconds from now.
skip_uid_claim (bool): specifies whether the `uid` claim should be present
in the token
skip_exp_claim (bool): specifies whether the `exp` claim should be present
in the token
Returns:
A JSON Web Token string that can be used directly in HTTP headers/cookies/etc...
"""
if exp is None:
exp = time.time() + AUTHTOKEN_LIFETIME_SECONDS
payload = {"uid": uid,
"exp": int(exp)}
if skip_uid_claim:
del payload['uid']
if skip_exp_claim:
del payload['exp']
key = load_key(key_path)
jwt_token = jwt.encode(payload, key, algorithm='HS256').decode('ascii')
return jwt_token
| apache-2.0 |
quinot/ansible | test/runner/lib/core_ci.py | 12 | 17839 | """Access Ansible Core CI remote services."""
from __future__ import absolute_import, print_function
import json
import os
import traceback
import uuid
import errno
import time
import shutil
from lib.http import (
HttpClient,
HttpResponse,
HttpError,
)
from lib.util import (
ApplicationError,
run_command,
make_dirs,
display,
is_shippable,
)
from lib.config import (
EnvironmentConfig,
)
AWS_ENDPOINTS = {
'us-east-1': 'https://14blg63h2i.execute-api.us-east-1.amazonaws.com',
'us-east-2': 'https://g5xynwbk96.execute-api.us-east-2.amazonaws.com',
}
class AnsibleCoreCI(object):
"""Client for Ansible Core CI services."""
def __init__(self, args, platform, version, stage='prod', persist=True, load=True, name=None, provider=None):
"""
:type args: EnvironmentConfig
:type platform: str
:type version: str
:type stage: str
:type persist: bool
:type load: bool
:type name: str
"""
self.args = args
self.platform = platform
self.version = version
self.stage = stage
self.client = HttpClient(args)
self.connection = None
self.instance_id = None
self.endpoint = None
self.max_threshold = 1
self.name = name if name else '%s-%s' % (self.platform, self.version)
self.ci_key = os.path.expanduser('~/.ansible-core-ci.key')
self.resource = 'jobs'
# Assign each supported platform to one provider.
# This is used to determine the provider from the platform when no provider is specified.
providers = dict(
aws=(
'aws',
'windows',
'freebsd',
'vyos',
'junos',
'ios',
),
azure=(
'azure',
'rhel',
),
parallels=(
'osx',
),
)
if provider:
# override default provider selection (not all combinations are valid)
self.provider = provider
else:
for candidate in providers:
if platform in providers[candidate]:
# assign default provider based on platform
self.provider = candidate
break
if self.provider in ('aws', 'azure'):
if self.provider != 'aws':
self.resource = self.provider
if args.remote_aws_region:
# permit command-line override of region selection
region = args.remote_aws_region
# use a dedicated CI key when overriding the region selection
self.ci_key += '.%s' % args.remote_aws_region
elif is_shippable():
# split Shippable jobs across multiple regions to maximize use of launch credits
if self.platform == 'windows':
region = 'us-east-2'
else:
region = 'us-east-1'
else:
# send all non-Shippable jobs to us-east-1 to reduce api key maintenance
region = 'us-east-1'
self.endpoints = AWS_ENDPOINTS[region],
if self.platform == 'windows':
self.ssh_key = None
self.port = 5986
else:
self.ssh_key = SshKey(args)
self.port = 22
elif self.provider == 'parallels':
self.endpoints = self._get_parallels_endpoints()
self.max_threshold = 6
self.ssh_key = SshKey(args)
self.port = None
else:
raise ApplicationError('Unsupported platform: %s' % platform)
self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage))
if persist and load and self._load():
try:
display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.connection = self.get(always_raise_on=[404])
display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
except HttpError as ex:
if ex.status != 404:
raise
self._clear()
display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
self.instance_id = None
self.endpoint = None
elif not persist:
self.instance_id = None
self.endpoint = None
self._clear()
if self.instance_id:
self.started = True
else:
self.started = False
self.instance_id = str(uuid.uuid4())
self.endpoint = None
def _get_parallels_endpoints(self):
"""
:rtype: tuple[str]
"""
client = HttpClient(self.args, always=True)
display.info('Getting available endpoints...', verbosity=1)
sleep = 3
for _ in range(1, 10):
response = client.get('https://s3.amazonaws.com/ansible-ci-files/ansible-test/parallels-endpoints.txt')
if response.status_code == 200:
endpoints = tuple(response.response.splitlines())
display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1)
return endpoints
display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep))
time.sleep(sleep)
raise ApplicationError('Unable to get available endpoints.')
def start(self):
"""Start instance."""
if self.started:
display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
if is_shippable():
return self.start_shippable()
return self.start_remote()
def start_remote(self):
"""Start instance for remote development/testing."""
with open(self.ci_key, 'r') as key_fd:
auth_key = key_fd.read().strip()
return self._start(dict(
remote=dict(
key=auth_key,
nonce=None,
),
))
def start_shippable(self):
"""Start instance on Shippable."""
return self._start(dict(
shippable=dict(
run_id=os.environ['SHIPPABLE_BUILD_ID'],
job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
),
))
def stop(self):
"""Stop instance."""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
response = self.client.delete(self._uri)
if response.status_code == 404:
self._clear()
display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
if response.status_code == 200:
self._clear()
display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return
raise self._create_http_error(response)
def get(self, tries=3, sleep=15, always_raise_on=None):
"""
Get instance connection information.
:type tries: int
:type sleep: int
:type always_raise_on: list[int] | None
:rtype: InstanceConnection
"""
if not self.started:
display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
verbosity=1)
return None
if not always_raise_on:
always_raise_on = []
if self.connection and self.connection.running:
return self.connection
while True:
tries -= 1
response = self.client.get(self._uri)
if response.status_code == 200:
break
error = self._create_http_error(response)
if not tries or response.status_code in always_raise_on:
raise error
display.warning('%s. Trying again after %d seconds.' % (error, sleep))
time.sleep(sleep)
if self.args.explain:
self.connection = InstanceConnection(
running=True,
hostname='cloud.example.com',
port=self.port or 12345,
username='username',
password='password' if self.platform == 'windows' else None,
)
else:
response_json = response.json()
status = response_json['status']
con = response_json['connection']
self.connection = InstanceConnection(
running=status == 'running',
hostname=con['hostname'],
port=int(con.get('port', self.port)),
username=con['username'],
password=con.get('password'),
)
status = 'running' if self.connection.running else 'starting'
display.info('Status update: %s/%s on instance %s is %s.' %
(self.platform, self.version, self.instance_id, status),
verbosity=1)
return self.connection
def wait(self):
"""Wait for the instance to become ready."""
for _ in range(1, 90):
if self.get().running:
return
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.platform, self.version, self.instance_id))
@property
def _uri(self):
return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.resource, self.instance_id)
def _start(self, auth):
"""Start instance."""
display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
if self.platform == 'windows':
with open('examples/scripts/ConfigureRemotingForAnsible.ps1', 'rb') as winrm_config_fd:
winrm_config = winrm_config_fd.read().decode('utf-8')
else:
winrm_config = None
data = dict(
config=dict(
platform=self.platform,
version=self.version,
public_key=self.ssh_key.pub_contents if self.ssh_key else None,
query=False,
winrm_config=winrm_config,
)
)
data.update(dict(auth=auth))
headers = {
'Content-Type': 'application/json',
}
response = self._start_try_endpoints(data, headers)
self.started = True
self._save()
display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
if self.args.explain:
return {}
return response.json()
def _start_try_endpoints(self, data, headers):
"""
:type data: dict[str, any]
:type headers: dict[str, str]
:rtype: HttpResponse
"""
threshold = 1
while threshold <= self.max_threshold:
for self.endpoint in self.endpoints:
try:
return self._start_at_threshold(data, headers, threshold)
except CoreHttpError as ex:
if ex.status == 503:
display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1)
continue
display.error(ex.remote_message)
except HttpError as ex:
display.error(u'%s' % ex)
time.sleep(3)
threshold += 1
raise ApplicationError('Maximum threshold reached and all endpoints exhausted.')
def _start_at_threshold(self, data, headers, threshold):
"""
:type data: dict[str, any]
:type headers: dict[str, str]
:type threshold: int
:rtype: HttpResponse | None
"""
tries = 3
sleep = 15
data['threshold'] = threshold
display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1)
while True:
tries -= 1
response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
if response.status_code == 200:
return response
error = self._create_http_error(response)
if response.status_code == 503:
raise error
if not tries:
raise error
display.warning('%s. Trying again after %d seconds.' % (error, sleep))
time.sleep(sleep)
def _clear(self):
"""Clear instance information."""
try:
self.connection = None
os.remove(self.path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def _load(self):
"""Load instance information."""
try:
with open(self.path, 'r') as instance_fd:
data = instance_fd.read()
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
return False
if not data.startswith('{'):
return False # legacy format
config = json.loads(data)
return self.load(config)
def load(self, config):
"""
:type config: dict[str, str]
:rtype: bool
"""
self.instance_id = config['instance_id']
self.endpoint = config['endpoint']
self.started = True
return True
def _save(self):
"""Save instance information."""
if self.args.explain:
return
config = self.save()
make_dirs(os.path.dirname(self.path))
with open(self.path, 'w') as instance_fd:
instance_fd.write(json.dumps(config, indent=4, sort_keys=True))
def save(self):
"""
:rtype: dict[str, str]
"""
return dict(
platform_version='%s/%s' % (self.platform, self.version),
instance_id=self.instance_id,
endpoint=self.endpoint,
)
@staticmethod
def _create_http_error(response):
"""
:type response: HttpResponse
:rtype: ApplicationError
"""
response_json = response.json()
stack_trace = ''
if 'message' in response_json:
message = response_json['message']
elif 'errorMessage' in response_json:
message = response_json['errorMessage'].strip()
if 'stackTrace' in response_json:
trace = '\n'.join([x.rstrip() for x in traceback.format_list(response_json['stackTrace'])])
stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
else:
message = str(response_json)
return CoreHttpError(response.status_code, message, stack_trace)
class CoreHttpError(HttpError):
"""HTTP response as an error."""
def __init__(self, status, remote_message, remote_stack_trace):
"""
:type status: int
:type remote_message: str
:type remote_stack_trace: str
"""
super(CoreHttpError, self).__init__(status, '%s%s' % (remote_message, remote_stack_trace))
self.remote_message = remote_message
self.remote_stack_trace = remote_stack_trace
class SshKey(object):
"""Container for SSH key used to connect to remote instances."""
KEY_NAME = 'id_rsa'
PUB_NAME = 'id_rsa.pub'
def __init__(self, args):
"""
:type args: EnvironmentConfig
"""
cache_dir = 'test/cache'
self.key = os.path.join(cache_dir, self.KEY_NAME)
self.pub = os.path.join(cache_dir, self.PUB_NAME)
if not os.path.isfile(self.key) or not os.path.isfile(self.pub):
base_dir = os.path.expanduser('~/.ansible/test/')
key = os.path.join(base_dir, self.KEY_NAME)
pub = os.path.join(base_dir, self.PUB_NAME)
if not args.explain:
make_dirs(base_dir)
if not os.path.isfile(key) or not os.path.isfile(pub):
run_command(args, ['ssh-keygen', '-q', '-t', 'rsa', '-N', '', '-f', key])
if not args.explain:
shutil.copy2(key, self.key)
shutil.copy2(pub, self.pub)
if args.explain:
self.pub_contents = None
else:
with open(self.pub, 'r') as pub_fd:
self.pub_contents = pub_fd.read().strip()
class InstanceConnection(object):
"""Container for remote instance status and connection details."""
def __init__(self, running, hostname, port, username, password):
"""
:type running: bool
:type hostname: str
:type port: int
:type username: str
:type password: str | None
"""
self.running = running
self.hostname = hostname
self.port = port
self.username = username
self.password = password
def __str__(self):
if self.password:
return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
return '%s:%s [%s]' % (self.hostname, self.port, self.username)
| gpl-3.0 |
kushalbhola/MyStuff | venv/Lib/site-packages/setuptools/command/install_lib.py | 431 | 3840 | import os
import imp
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(imp, 'get_tag'):
return
base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| apache-2.0 |
gbalme/OnlineGamingCoin | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
rosswhitfield/mantid | Testing/SystemTests/tests/framework/ILLPowderD2BEfficiencyTest.py | 3 | 2016 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import systemtesting
from mantid.simpleapi import PowderILLEfficiency, GroupWorkspaces
from mantid import config, mtd
import numpy as np
class ILLPowderD2BEfficiencyTest(systemtesting.MantidSystemTest):
def __init__(self):
super(ILLPowderD2BEfficiencyTest, self).__init__()
self.setUp()
def setUp(self):
config['default.facility'] = 'ILL'
config['default.instrument'] = 'D2B'
config.appendDataSearchSubDir('ILL/D2B/')
def requiredFiles(self):
return ['532008.nxs', '532009.nxs']
def cleanup(self):
mtd.clear()
def testAutoMasking(self):
PowderILLEfficiency(CalibrationRun='532008,532009',
DerivationMethod='GlobalSummedReference2D',
ExcludedRange=[-5,10],
OutputWorkspace='masked',
MaskCriterion=[0.3,3])
data = mtd['masked'].extractY().flatten()
data = data[np.nonzero(data)]
coeff_max = data.max()
self.assertLessEqual(coeff_max, 3.)
coeff_min = data.min()
self.assertGreaterEqual(coeff_min, 0.3)
def runTest(self):
self.testAutoMasking()
PowderILLEfficiency(CalibrationRun='532008,532009',
DerivationMethod='GlobalSummedReference2D',
ExcludedRange=[-5,10],
OutputWorkspace='calib',
OutputResponseWorkspace='response')
GroupWorkspaces(InputWorkspaces=['calib','response'], OutputWorkspace='group')
def validate(self):
self.tolerance = 0.01
return ['group', 'D2B_DetEffCorr_Ref.nxs']
| gpl-3.0 |
smallredbean/pgctl | tests/spec/fuser.py | 1 | 1055 | # pylint:disable=no-self-use
from __future__ import absolute_import
from __future__ import unicode_literals
from testfixtures import ShouldRaise
from testing.subprocess import assert_command
def assert_does_not_find(path):
assert_command(
('pgctl-fuser', path),
'',
'',
0,
)
def it_can_find_the_user(tmpdir):
testfile = tmpdir.join('testfile').ensure()
assert_does_not_find(str(testfile))
with testfile.open():
with ShouldRaise(AssertionError):
assert_does_not_find(str(testfile))
from os import getpid
assert_command(
('pgctl-fuser', str(testfile)),
'%i\n' % getpid(),
'',
0,
close_fds=True,
)
assert_does_not_find(str(testfile))
def it_shows_help_given_no_arguments():
from pgctl.fuser import __doc__
assert_command(
('pgctl-fuser',),
'',
__doc__ + '\n',
1,
)
def it_properly_ignores_nosuchfile():
assert_does_not_find('nosuchfile')
| mit |
xuru/pyvisdk | pyvisdk/do/vm_suspending_event.py | 1 | 1152 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmSuspendingEvent(vim, *args, **kwargs):
'''This event records a virtual machine suspending.'''
obj = vim.client.factory.create('ns0:VmSuspendingEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit |
cakesultancm11/android_kernel_samsung_msm8660-common | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
divya-csekar/flask-microblog-server | flask/Lib/site-packages/sqlalchemy/ext/orderinglist.py | 22 | 13816 | # ext/orderinglist.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, allevating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from ..orm.collections import collection, collection_adapter
from .. import util
__all__ = ['ordering_list']
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = 'count_from_%i' % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop('count_from', None)
if kw.get('ordering_func', None) is None and count_from is not None:
if count_from == 0:
kw['ordering_func'] = count_from_0
elif count_from == 1:
kw['ordering_func'] = count_from_1
else:
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`.relationship` function.
"""
def __init__(self, ordering_attr=None, ordering_func=None,
reorder_on_append=False):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
""" Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
| bsd-3-clause |
kubeflow/xgboost-operator | config/samples/smoke-dist/tracker.py | 2 | 16512 | """
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
--------------------------
This was taken from
https://github.com/dmlc/dmlc-core/blob/master/tracker/dmlc_tracker/tracker.py
See LICENSE here
https://github.com/dmlc/dmlc-core/blob/master/LICENSE
No code modified or added except for this explanatory comment.
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (
magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map,
ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; '
'assign rank %d', s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started',
nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info("worker(ip_address=%s) connected!" % get_some_ip(s_addr[0]))
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env,
shell=True)), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on '
'hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER': nworker,
'DMLC_NUM_SERVER': nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER': args.num_workers,
'DMLC_NUM_SERVER': args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip),
nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only '
'used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'
))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone "
"mode.")
if __name__ == "__main__":
main() | apache-2.0 |
m-kuhn/QGIS | python/plugins/processing/algs/qgis/SelectByExpression.py | 12 | 3871 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByExpression.py
---------------------
Date : July 2014
Copyright : (C) 2014 by Michael Douchin
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Douchin'
__date__ = 'July 2014'
__copyright__ = '(C) 2014, Michael Douchin'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsExpression,
QgsProcessing,
QgsVectorLayer,
QgsProcessingAlgorithm,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterExpression,
QgsProcessingParameterEnum,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByExpression(QgisAlgorithm):
INPUT = 'INPUT'
EXPRESSION = 'EXPRESSION'
OUTPUT = 'OUTPUT'
METHOD = 'METHOD'
def group(self):
return self.tr('Vector selection')
def groupId(self):
return 'vectorselection'
def __init__(self):
super().__init__()
def flags(self):
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading
def initAlgorithm(self, config=None):
self.methods = [self.tr('creating new selection'),
self.tr('adding to current selection'),
self.tr('removing from current selection'),
self.tr('selecting within current selection')]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer'), types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterExpression(self.EXPRESSION,
self.tr('Expression'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.METHOD,
self.tr('Modify current selection by'), self.methods, 0))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyexpression'
def displayName(self):
return self.tr('Select by expression')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
method = self.parameterAsEnum(parameters, self.METHOD, context)
if method == 0:
behavior = QgsVectorLayer.SetSelection
elif method == 1:
behavior = QgsVectorLayer.AddToSelection
elif method == 2:
behavior = QgsVectorLayer.RemoveFromSelection
elif method == 3:
behavior = QgsVectorLayer.IntersectSelection
expression = self.parameterAsString(parameters, self.EXPRESSION, context)
qExp = QgsExpression(expression)
if qExp.hasParserError():
raise QgsProcessingException(qExp.parserErrorString())
layer.selectByExpression(expression, behavior)
return {self.OUTPUT: parameters[self.INPUT]}
| gpl-2.0 |
Walt280/cosmos | code/graph-algorithms/breadth_first_search/breadth_first_search.py | 5 | 1799 | """ Part of Cosmos by OpenGenus Foundation"""
import collections
"""
Wrapper function for the print function.
Used as the default visitFunc for bfs
"""
def visitPrint(i):
print(i)
"""
A class representing a undirected graph of nodes.
An edge can be added between two nodes by calling addEdge
*This class assumes all edge weights are equal
"""
class Graph:
def __init__(self):
self.adjList = collections.defaultdict(set)
def addEdge(self, node1, node2):
self.adjList[node1].add(node2)
self.adjList[node2].add(node1)
"""
Given a 'start' node and a 'graph', call visitFunc
sequentially on the current node, and then its children
and so forth.
When visiting each node, mark it as visited by adding it to the hashmap.
Then queue up all of its children to be visited next.
"""
def bfs(start, graph, visitFunc=visitPrint):
visited = collections.defaultdict(bool)
queue = collections.deque()
queue.append(start)
while(len(queue) > 0):
current = queue.popleft()
if (not visited[current]):
visited[current] = True
visitFunc(current)
for neighbor in graph.adjList[current]:
queue.append(neighbor)
# Testing the breadth first search implementation
if __name__ == "__main__":
# Testing on this tree
# 1
# / \
# / \
# 2 3
# / \ / \
# 4 5 6 7
g = Graph()
g.addEdge(1, 2)
g.addEdge(1, 3)
g.addEdge(2, 4)
g.addEdge(2, 5)
g.addEdge(3, 6)
g.addEdge(3, 7)
print("Test 1:")
bfs(1, g)
print("\nTest2:")
bfs(2, g)
"""Output:
Test 1:
1
2
3
4
5
6
7
Test2:
2
1
4
5
3
6
7
"""
| gpl-3.0 |
evanma92/routeh | flask/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py | 21 | 109634 | # sql/compiler.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import re
from . import schema, sqltypes, operators, functions, \
util as sql_util, visitors, elements, selectable, base
from .. import util, exc
import decimal
import itertools
import operator
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
REQUIRED = util.symbol('REQUIRED', """
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`.Connection.execute`.
This symbol is typically used when a :func:`.expression.insert`
or :func:`.expression.update` statement is compiled without parameter
values present.
""")
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_compiled(self, multiparams, params)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(object):
"""Produces DDL specification for TypeEngine objects."""
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_):
return type_._compiler_dispatch(self)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
class SQLCompiler(Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self.result_map = {}
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam, name in self.bind_names.items():
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
else:
pd[name] = bindparam.effective_value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
pd[self.bind_names[bindparam]] = bindparam.effective_value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(table.schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kwargs):
return self.dialect.type_compiler.process(typeclause.type)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf, iswrapper=False,
compound_index=0, force_result_map=False,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = force_result_map or (
compound_index == 0 and (
toplevel or
entry['iswrapper']
)
)
if populate_result_map:
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, order_by_select=None, **kw):
if order_by_select is not None:
return self._order_by_clauselist(
clauselist, order_by_select, **kw)
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def _order_by_clauselist(self, clauselist, order_by_select, **kw):
# look through raw columns collection for labels.
# note that its OK we aren't expanding tables and other selectables
# here; we can only add a label in the ORDER BY for an individual
# label expression in the columns clause.
raw_col = set(l._order_by_label_element.name
for l in order_by_select._raw_columns
if l._order_by_label_element is not None)
return ", ".join(
s for s in
(
c._compiler_dispatch(
self,
render_label_as_label=c._order_by_label_element if
c._order_by_label_element is not None and
c._order_by_label_element.name in raw_col
else None,
**kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'iswrapper': toplevel,
'asfrom_froms': entry['asfrom_froms']
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit is not None or cs._offset is not None) and \
self.limit_clause(cs) or ""
if self.ctes and \
compound_index == 0 and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = getattr(self, "visit_%s_unary_operator" %
unary.operator.__name__, None)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = getattr(self, "visit_%s_unary_modifier" %
unary.modifier.__name__, None)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_binary(self, binary, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator = binary.operator
disp = getattr(self, "visit_%s_binary" % operator.__name__, None)
if disp:
return disp(binary, operator, **kw)
else:
try:
opstring = OPERATORS[operator]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(self, binary, opstring, **kw):
return binary.left._compiler_dispatch(self, **kw) + \
opstring + \
binary.right._compiler_dispatch(self, **kw)
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += " AS " + cte_name
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
" AS " + \
self.preparer.format_alias(alias, alias_name)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def _add_to_result_map(self, keyname, name, objects, type_):
if not self.dialect.case_sensitive:
keyname = keyname.lower()
if keyname in self.result_map:
# conflicting keyname, just double up the list
# of objects. this will cause an "ambiguous name"
# error if an attempt is made by the result set to
# access.
e_name, e_obj, e_type = self.result_map[keyname]
self.result_map[keyname] = e_name, e_obj + objects, e_type
else:
self.result_map[keyname] = name, objects, type_
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
objects + (column,), type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif not isinstance(column,
(elements.UnaryExpression, elements.TextClause)) \
and (not hasattr(column, 'name') or
isinstance(column, functions.Function)):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite the main one).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
for key, (name, objs, typ) in list(self.result_map.items()):
objs = tuple([d.get(col, col) for col in objs])
self.result_map[key] = (name, objs, typ)
_default_stack_entry = util.immutabledict([
('iswrapper', False),
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
iswrapper=False, fromhints=None,
compound_index=0,
force_result_map=False,
nested_join_translation=False,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
iswrapper=iswrapper, fromhints=fromhints,
compound_index=compound_index,
force_result_map=force_result_map,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = force_result_map or (
compound_index == 0 and (
toplevel or
entry['iswrapper']
)
)
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'iswrapper': iswrapper,
'correlate_froms': all_correlate_froms
}
self.stack.append(new_entry)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
if hint_text:
text += hint_text + " "
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
if self.dialect.supports_simple_order_by_label:
order_by_select = select
else:
order_by_select = None
text += self.order_by_clause(
select, order_by_select=order_by_select, **kwargs)
if select._limit is not None or select._offset is not None:
text += self.limit_clause(select)
if select._for_update_arg is not None:
text += self.for_update_clause(select)
if self.ctes and \
compound_index == 0 and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT " + self.process(elements.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(elements.literal(select._offset))
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
"iswrapper": False,
"asfrom_froms": set(),
"selectable": insert_stmt})
self.isinsert = True
colparams = self._get_colparams(insert_stmt, **kw)
if not colparams and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
colparams_single = colparams[0]
else:
colparams_single = colparams
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
insert_stmt.table,
dialect_hints[insert_stmt.table],
True
)
text += table_text
if colparams_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in colparams_single])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if insert_stmt.select is not None:
text += " %s" % self.process(insert_stmt.select, **kw)
elif not colparams and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in colparam_set)
)
for colparam_set in colparams
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in colparams])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return from_table._compiler_dispatch(self, asfrom=True,
iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"iswrapper": False,
"asfrom_froms": set([update_stmt.table])})
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
colparams = self._get_colparams(update_stmt, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
update_stmt.table,
dialect_hints[update_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in colparams
)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
self.stack.pop(-1)
return text
def _create_crud_bind_param(self, col, value, required=False, name=None):
if name is None:
name = col.key
bindparam = elements.BindParameter(name, value,
type_=col.type, required=required)
bindparam._is_crud = True
return bindparam._compiler_dispatch(self)
@util.memoized_property
def _key_getters_for_crud_column(self):
if self.isupdate and self.statement._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(self.statement._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, 'table') and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _get_colparams(self, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
self.postfetch = []
self.prefetch = []
self.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if self.column_keys is None and stmt.parameters is None:
return [
(c, self._create_crud_bind_param(c,
None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
_column_as_key, _getattr_col_key, _col_bind_name = \
self._key_getters_for_crud_column
# if we have statement parameters - set defaults in the
# compiled params
if self.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), REQUIRED)
for key in self.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = self.process(
elements.BindParameter(None, v, type_=k.type),
**kw)
else:
v = self.process(v.self_group(), **kw)
values.append((k, v))
need_pks = self.isinsert and \
not self.inline and \
not stmt._returning
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
if self.isinsert:
implicit_return_defaults = (implicit_returning and
stmt._return_defaults)
elif self.isupdate:
implicit_return_defaults = (self.dialect.implicit_returning and
stmt.table.implicit_returning and
stmt._return_defaults)
else:
implicit_return_defaults = False
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if self.isupdate and stmt._extra_froms and stmt_parameters:
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is REQUIRED,
name=_col_bind_name(c))
else:
self.postfetch.append(c)
value = self.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually
# to be updated - process onupdate and
# server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif (c.onupdate is not None and not
c.onupdate.is_sequence):
if c.onupdate.is_clause_element:
values.append(
(c, self.process(
c.onupdate.arg.self_group(),
**kw)
)
)
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(
c, None, name=_col_bind_name(c)
)
)
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
self.postfetch.append(c)
if self.isinsert and stmt.select_names:
# for an insert from select, we can only use names that
# are given, so only select for those names.
cols = (stmt.table.c[_column_as_key(name)]
for name in stmt.select_names)
else:
# iterate through all table columns to maintain
# ordering, even for those cols that aren't included
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
value = parameters.pop(col_key)
if elements._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_0" % _col_bind_name(c)
)
else:
if isinstance(value, elements.BindParameter) and \
value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
self.returning.append(c)
value = self.process(value.self_group(), **kw)
elif implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
value = self.process(value.self_group(), **kw)
else:
self.postfetch.append(c)
value = self.process(value.self_group(), **kw)
values.append((c, value))
elif self.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
if c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or
not self.dialect.sequences_optional):
proc = self.process(c.default, **kw)
values.append((c, proc))
self.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c, self.process(
c.default.arg.self_group(), **kw))
)
self.returning.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
else:
self.returning.append(c)
else:
if (
(c.default is not None and
(not c.default.is_sequence or
self.dialect.supports_sequences)) or
c is stmt.table._autoincrement_column and
(self.dialect.supports_sequences or
self.dialect.
preexecute_autoincrement_sequences)
):
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or
not self.dialect.sequences_optional):
proc = self.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif not c.primary_key:
self.postfetch.append(c)
elif c.default.is_clause_element:
values.append(
(c, self.process(
c.default.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif not c.primary_key:
# don't add primary key column to postfetch
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_default is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif not c.primary_key:
self.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif self.isupdate:
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, self.process(
c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
else:
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
else:
self.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
if parameters and stmt_parameters:
check = set(parameters).intersection(
_column_as_key(k) for k in stmt.parameters
).difference(check_columns)
if check:
raise exc.CompileError(
"Unconsumed column names: %s" %
(", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values_0 = values
values = [values]
values.extend(
[
(
c,
(self._create_crud_bind_param(
c, row[c.key],
name="%s_%d" % (c.key, i + 1)
) if elements._is_literal(row[c.key])
else self.process(
row[c.key].self_group(), **kw))
if c.key in row else param
)
for (c, param) in values_0
]
for i, row in enumerate(stmt.parameters[1:])
)
return values
def visit_delete(self, delete_stmt, **kw):
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"iswrapper": False,
"asfrom_froms": set([delete_stmt.table])})
self.isdelete = True
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
delete_stmt.table,
dialect_hints[delete_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self)
if t:
text += " WHERE " + t
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] +
table._prefixes +
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(table)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(self, table):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key])
return ", \n\t".join(p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if include_schema and index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
self.preparer.format_constraint(drop.element),
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(column.server_default.arg)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint._elements.values())[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint._elements.values()),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint._elements.values())
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_):
return "FLOAT"
def visit_REAL(self, type_):
return "REAL"
def visit_NUMERIC(self, type_):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_):
return "INTEGER"
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_CLOB(self, type_):
return "CLOB"
def visit_NCLOB(self, type_):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_):
return "BLOB"
def visit_BINARY(self, type_):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_boolean(self, type_):
return self.visit_BOOLEAN(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_datetime(self, type_):
return self.visit_DATETIME(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_big_integer(self, type_):
return self.visit_BIGINT(type_)
def visit_small_integer(self, type_):
return self.visit_SMALLINT(type_)
def visit_integer(self, type_):
return self.visit_INTEGER(type_)
def visit_real(self, type_):
return self.visit_REAL(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_numeric(self, type_):
return self.visit_NUMERIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_unicode(self, type_):
return self.visit_VARCHAR(type_)
def visit_text(self, type_):
return self.visit_TEXT(type_)
def visit_unicode_text(self, type_):
return self.visit_TEXT(type_)
def visit_enum(self, type_):
return self.visit_VARCHAR(type_)
def visit_null(self, type_):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_):
return self.process(type_.type_engine(self.dialect))
def visit_user_defined(self, type_):
return type_.get_col_spec()
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
if (not self.omit_schema and use_schema and
sequence.schema is not None):
name = self.quote_schema(sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident)
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
| bsd-3-clause |
archesproject/arches | arches/app/utils/data_management/sparql_providers/aat_provider.py | 1 | 5763 | """
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib.request, urllib.error, urllib.parse
from django.utils.translation import ugettext as _
from arches.app.models.models import DValueType
from arches.app.models.concept import Concept, ConceptValue
from arches.app.models.system_settings import settings
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from SPARQLWrapper import SPARQLWrapper, JSON
from .abstract_provider import Abstract_Provider
from rdflib.namespace import SKOS, DCTERMS
class AAT_Provider(Abstract_Provider):
def __init__(self, **kwargs):
super(AAT_Provider, self).__init__("http://vocab.getty.edu/sparql.json", **kwargs)
self.name = _("Getty AAT")
self.setReturnFormat(JSON)
def get_concepts(self, uris):
"""
Get a list of concepts given a list of AAT uris like http://vocab.getty.edu/aat/300380087
"""
default_lang = settings.LANGUAGE_CODE
dcterms_identifier_type = DValueType.objects.get(valuetype=str(DCTERMS.identifier).replace(str(DCTERMS), ""), namespace="dcterms")
concepts = []
langs = []
for lang in self.allowed_languages:
# the AAT expects language codes to be all lower case
langs.append('"%s"' % (lang.lower()))
for uri in uris.split(","):
query = """
SELECT ?value ?type WHERE {
{
<%s> skos:prefLabel ?value .
BIND('prefLabel' AS ?type)
}
UNION
{
<%s> skos:scopeNote [rdf:value ?value] .
BIND('scopeNote' AS ?type)
}
FILTER (lang(?value) in (%s))
}""" % (
uri,
uri,
",".join(langs),
)
results = self.perform_sparql_query(query)
if len(results["results"]["bindings"]) > 0:
concept = Concept()
concept.nodetype = "Concept"
for result in results["results"]["bindings"]:
concept.addvalue(
{"type": result["type"]["value"], "value": result["value"]["value"], "language": result["value"]["xml:lang"]}
)
concept.addvalue(
{
"value": uri,
"language": settings.LANGUAGE_CODE,
"type": dcterms_identifier_type.valuetype,
"category": dcterms_identifier_type.category,
}
)
concepts.append(concept)
else:
raise Exception(
_(
"<strong>Error in SPARQL query:</strong><br>Test this query directly by pasting the query below into the Getty's \
own SPARQL endpoint at <a href='http://vocab.getty.edu/sparql' target='_blank'>http://vocab.getty.edu/sparql</a> \
<i><pre>%s</pre></i>Query returned 0 results, please check the query for errors. \
You may need to add the appropriate languages into the database for this query to work<br><br>"
)
% (query.replace("<", "<").replace(">", ">"))
)
return concepts
def search_for_concepts(self, terms):
query = """PREFIX dct: <http://purl.org/dc/terms/>
PREFIX gvp: <http://vocab.getty.edu/ontology#>
PREFIX gvp_lang: <http://vocab.getty.edu/language/>
PREFIX luc: <http://www.ontotext.com/owlim/lucene#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX skosxl: <http://www.w3.org/2008/05/skos-xl#>
SELECT ?Subject ?Term ?ScopeNote {
?Subject luc:term '%s*'; a skos:Concept; skos:inScheme aat:.
#?Subject rdf:type c.
#?typ rdfs:subClassOf gvp:Subject; rdfs:label ?Type.
optional {?Subject (gvp:prefLabelGVP | skos:prefLabel) [skosxl:literalForm ?Term]}
#optional {?Subject gvp:parentStringAbbrev ?Parents}
optional {?Subject skos:scopeNote [dct:language gvp_lang:en; rdf:value ?ScopeNote]}}""" % (
terms
)
results = self.perform_sparql_query(query)
return results
def perform_sparql_query(self, query):
self.setQuery(query)
# print query
# return HttpResponse(self.endpoint + '?' + self._getRequestEncodedParameters(("query", self.queryString)))
req = urllib.request.Request(self.endpoint + "?" + self._getRequestEncodedParameters(("query", self.queryString)))
req.add_header("Accept", "application/sparql-results+json")
f = urllib.request.urlopen(req)
return JSONDeserializer().deserialize(f.read())
| agpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/urllib3/util/url.py | 203 | 6487 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ('http', 'https', None)
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme:
scheme = scheme.lower()
if host and scheme in NORMALIZABLE_SCHEMES:
host = host.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers. No whitespace, no plus or
# minus prefixes, no non-integer digits such as ^2 (superscript).
if not port.isdigit():
raise LocationParseError(url)
try:
port = int(port)
except ValueError:
raise LocationParseError(url)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| gpl-3.0 |
LittleBun/ardupilot | mk/PX4/Tools/genmsg/scripts/genmsg_check_deps.py | 216 | 2999 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import sys
from genmsg import EXT_MSG, EXT_SRV, MsgContext
from genmsg.gentools import compute_full_type_name
from genmsg.msg_loader import load_msg_from_file, load_srv_from_file
from genmsg.msgs import bare_msg_type, is_builtin, resolve_type
pkg_name = sys.argv[1]
msg_file = sys.argv[2]
deps = sys.argv[3].split(':') if len(sys.argv) > 3 else []
msg_context = MsgContext.create_default()
full_type_name = compute_full_type_name(pkg_name, os.path.basename(msg_file))
if msg_file.endswith(EXT_MSG):
spec = load_msg_from_file(msg_context, msg_file, full_type_name)
unresolved_types = spec.types
elif msg_file.endswith(EXT_SRV):
spec = load_srv_from_file(msg_context, msg_file, full_type_name)
unresolved_types = spec.request.types + spec.response.types
else:
print("Processing file: '%s' - unknown file extension" % msg_file, file=sys.stderr)
sys.exit(1)
package_context = spec.package
for unresolved_type in unresolved_types:
bare_type = bare_msg_type(unresolved_type)
resolved_type = resolve_type(bare_type, package_context)
if not is_builtin(resolved_type) and resolved_type not in deps:
print("The dependencies of the message/service '%s' have changed. Please rerun cmake." % spec.full_name, file=sys.stderr)
sys.exit(1)
| gpl-3.0 |
ananswam/bioscrape | tests/test_randomized_delay.py | 1 | 3441 | import warnings
# We don't want warnings in dependencies to show up in bioscrape's tests.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import numpy as np
import pylab as plt
import random
import pytest
import test_utils
from bioscrape.simulator import *
from bioscrape.types import *
# Seed RNG value. All tests use this value.
seed = 54173
all_delay_types = ["fixed", "gaussian", "gamma"]
delay_classes = {
"no": Delay,
"fixed": FixedDelay,
"gaussian": GaussianDelay,
"gamma": GammaDelay
}
delay_required_params = {
"fixed":["delay"],
"gaussian":["mean", "std"],
"gamma":["k", "theta"]
}
param_min = -4
param_max = 4
TEST_NAME = "random_delays"
def random_delay_model(delay_type):
'''
Returns a randomish model with a non-delayed reaction and a delay reaction.
Set to always return the same model, for any particular delay type.
WARNING: To produce consistent Models, this function resets the random seeds
used during Model construction. This may have unexpected effects on random
number generation outside this function as a side-effect.
'''
test_utils.set_seed(seed)
# Will always consider the reactions A-->B and A+B-->C, where only the
# first reaction has delay.
all_species = ["A", "B", "C"]
x0 = {"A": 25, "B": 5, "C": 0}
# A+B-->C
consol_k = round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3)
consolidation_rxn = (["A", "B"], ["C"], "massaction", {'k': consol_k})
# A-->B reaction, with delay.
delay_class = delay_classes[delay_type]
delay_k = round(np.exp(np.random.uniform(low = -1,
high = 1)), 3)
delay_params = dict()
for p in delay_required_params[delay_type]:
delay_params[p] = round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3)
conversion_rxn = (["A"], [], "massaction", {"k": delay_k}, delay_type,
[], ["B"], delay_params)
M = Model(reactions = [consolidation_rxn, conversion_rxn])
M.set_species(x0)
return M
# print("Simulating Delay =", delay_type, "params=", delay_params)
# results_s = py_simulate_model(timepoints, Model = M, stochastic = True, delay = True)
# plt.plot(timepoints, results_s["C"], label = "stochastic "+str(delay_type)+"params = "+str(delay_params), color = color_list[ind])
@pytest.mark.parametrize('delay_type', all_delay_types)
def test_delay_model(delay_type):
test_results = dict()
model = random_delay_model(delay_type)
timepoints = np.arange(0, 50, 0.01)
results_s = py_simulate_model(timepoints, Model = model, stochastic = True, return_dataframe = False).py_get_result()
test_results[delay_type + "_stochastic"] = results_s
test_utils.check_sim_results(TEST_NAME, test_results)
## Uncomment this when SBML-writing has been implemented for delay reactions.
# @pytest.mark.parametrize('delay_type', all_delay_types)
# def test_random_propensity_sbml(delay_type):
# model_dict = dict()
# model_dict[delay_type] = random_delay_model(delay_type)
# test_utils.check_sbml_IO(TEST_NAME, model_dict) | mit |
Gaia3D/QGIS | python/plugins/processing/algs/lidar/lastools/blast2demPro.py | 9 | 3836 | # -*- coding: utf-8 -*-
"""
***************************************************************************
blast2demPro.py
---------------------
Date : October 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class blast2demPro(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name = "blast2demPro"
self.group = "LAStools Production"
self.addParametersPointInputFolderGUI()
self.addParametersPointInputMergedGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(blast2demPro.ATTRIBUTE,
self.tr("Attribute"), blast2demPro.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(blast2demPro.PRODUCT,
self.tr("Product"), blast2demPro.PRODUCTS, 0))
self.addParameter(ParameterBoolean(blast2demPro.USE_TILE_BB,
self.tr("Use tile bounding box (after tiling with buffer)"), False))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersRasterOutputFormatGUI()
self.addParametersRasterOutputGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "blast2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
self.addParametersPointInputMergedCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(blast2demPro.ATTRIBUTE)
if attribute != 0:
commands.append("-" + blast2demPro.ATTRIBUTES[attribute])
product = self.getParameterValue(blast2demPro.PRODUCT)
if product != 0:
commands.append("-" + blast2demPro.PRODUCTS[product])
if (self.getParameterValue(blast2demPro.USE_TILE_BB)):
commands.append("-use_tile_bb")
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
self.addParametersRasterOutputFormatCommands(commands)
self.addParametersRasterOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
JetChars/vim | vim/bundle/python-mode/pymode/libs3/rope/refactor/suites.py | 75 | 4376 | from rope.base import ast
def find_visible(node, lines):
"""Return the line which is visible from all `lines`"""
root = ast_suite_tree(node)
return find_visible_for_suite(root, lines)
def find_visible_for_suite(root, lines):
if len(lines) == 1:
return lines[0]
line1 = lines[0]
line2 = find_visible_for_suite(root, lines[1:])
suite1 = root.find_suite(line1)
suite2 = root.find_suite(line2)
def valid(suite):
return suite is not None and not suite.ignored
if valid(suite1) and not valid(suite2):
return line1
if not valid(suite1) and valid(suite2):
return line2
if not valid(suite1) and not valid(suite2):
return None
while suite1 != suite2 and suite1.parent != suite2.parent:
if suite1._get_level() < suite2._get_level():
line2 = suite2.get_start()
suite2 = suite2.parent
elif suite1._get_level() > suite2._get_level():
line1 = suite1.get_start()
suite1 = suite1.parent
else:
line1 = suite1.get_start()
line2 = suite2.get_start()
suite1 = suite1.parent
suite2 = suite2.parent
if suite1 == suite2:
return min(line1, line2)
return min(suite1.get_start(), suite2.get_start())
def ast_suite_tree(node):
if hasattr(node, 'lineno'):
lineno = node.lineno
else:
lineno = 1
return Suite(node.body, lineno)
class Suite(object):
def __init__(self, child_nodes, lineno, parent=None, ignored=False):
self.parent = parent
self.lineno = lineno
self.child_nodes = child_nodes
self._children = None
self.ignored = ignored
def get_start(self):
if self.parent is None:
if self.child_nodes:
return self.local_start()
else:
return 1
return self.lineno
def get_children(self):
if self._children is None:
walker = _SuiteWalker(self)
for child in self.child_nodes:
ast.walk(child, walker)
self._children = walker.suites
return self._children
def local_start(self):
return self.child_nodes[0].lineno
def local_end(self):
end = self.child_nodes[-1].lineno
if self.get_children():
end = max(end, self.get_children()[-1].local_end())
return end
def find_suite(self, line):
if line is None:
return None
for child in self.get_children():
if child.local_start() <= line <= child.local_end():
return child.find_suite(line)
return self
def _get_level(self):
if self.parent is None:
return 0
return self.parent._get_level() + 1
class _SuiteWalker(object):
def __init__(self, suite):
self.suite = suite
self.suites = []
def _If(self, node):
self._add_if_like_node(node)
def _For(self, node):
self._add_if_like_node(node)
def _While(self, node):
self._add_if_like_node(node)
def _With(self, node):
self.suites.append(Suite(node.body, node.lineno, self.suite))
def _TryFinally(self, node):
if len(node.finalbody) == 1 and \
isinstance(node.body[0], ast.TryExcept):
self._TryExcept(node.body[0])
else:
self.suites.append(Suite(node.body, node.lineno, self.suite))
self.suites.append(Suite(node.finalbody, node.lineno, self.suite))
def _TryExcept(self, node):
self.suites.append(Suite(node.body, node.lineno, self.suite))
for handler in node.handlers:
self.suites.append(Suite(handler.body, node.lineno, self.suite))
if node.orelse:
self.suites.append(Suite(node.orelse, node.lineno, self.suite))
def _add_if_like_node(self, node):
self.suites.append(Suite(node.body, node.lineno, self.suite))
if node.orelse:
self.suites.append(Suite(node.orelse, node.lineno, self.suite))
def _FunctionDef(self, node):
self.suites.append(Suite(node.body, node.lineno,
self.suite, ignored=True))
def _ClassDef(self, node):
self.suites.append(Suite(node.body, node.lineno,
self.suite, ignored=True))
| apache-2.0 |
dagss/numpy_svn | numpy/oldnumeric/matrix.py | 88 | 1604 | # This module is for compatibility only.
__all__ = ['UserArray', 'squeeze', 'Matrix', 'asarray', 'dot', 'k', 'Numeric', 'LinearAlgebra', 'identity', 'multiply', 'types', 'string']
import types
from user_array import UserArray, asarray
import numpy.oldnumeric as Numeric
from numpy.oldnumeric import dot, identity, multiply
import numpy.oldnumeric.linear_algebra as LinearAlgebra
from numpy import matrix as Matrix, squeeze
# Hidden names that will be the same.
_table = [None]*256
for k in range(256):
_table[k] = chr(k)
_table = ''.join(_table)
_numchars = '0123456789.-+jeEL'
_todelete = []
for k in _table:
if k not in _numchars:
_todelete.append(k)
_todelete = ''.join(_todelete)
def _eval(astr):
return eval(astr.translate(_table,_todelete))
def _convert_from_string(data):
data.find
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(_eval,temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError, "Rows not the same size."
count += 1
newdata.append(newrow)
return newdata
_lkup = {'0':'000',
'1':'001',
'2':'010',
'3':'011',
'4':'100',
'5':'101',
'6':'110',
'7':'111'}
def _binary(num):
ostr = oct(num)
bin = ''
for ch in ostr[1:]:
bin += _lkup[ch]
ind = 0
while bin[ind] == '0':
ind += 1
return bin[ind:]
| bsd-3-clause |
bdastur/pyvim | .vim/bundle/python-mode/pymode/libs3/rope/base/resourceobserver.py | 91 | 10266 | import os
class ResourceObserver(object):
"""Provides the interface for observing resources
`ResourceObserver`\s can be registered using `Project.
add_observer()`. But most of the time `FilteredResourceObserver`
should be used. `ResourceObserver`\s report all changes passed
to them and they don't report changes to all resources. For
example if a folder is removed, it only calls `removed()` for that
folder and not its contents. You can use
`FilteredResourceObserver` if you are interested in changes only
to a list of resources. And you want changes to be reported on
individual resources.
"""
def __init__(self, changed=None, moved=None, created=None,
removed=None, validate=None):
self.changed = changed
self.moved = moved
self.created = created
self.removed = removed
self._validate = validate
def resource_changed(self, resource):
"""It is called when the resource changes"""
if self.changed is not None:
self.changed(resource)
def resource_moved(self, resource, new_resource):
"""It is called when a resource is moved"""
if self.moved is not None:
self.moved(resource, new_resource)
def resource_created(self, resource):
"""Is called when a new resource is created"""
if self.created is not None:
self.created(resource)
def resource_removed(self, resource):
"""Is called when a new resource is removed"""
if self.removed is not None:
self.removed(resource)
def validate(self, resource):
"""Validate the existence of this resource and its children.
This function is called when rope need to update its resource
cache about the files that might have been changed or removed
by other processes.
"""
if self._validate is not None:
self._validate(resource)
class FilteredResourceObserver(object):
"""A useful decorator for `ResourceObserver`
Most resource observers have a list of resources and are
interested only in changes to those files. This class satisfies
this need. It dispatches resource changed and removed messages.
It performs these tasks:
* Changes to files and folders are analyzed to check whether any
of the interesting resources are changed or not. If they are,
it reports these changes to `resource_observer` passed to the
constructor.
* When a resource is removed it checks whether any of the
interesting resources are contained in that folder and reports
them to `resource_observer`.
* When validating a folder it validates all of the interesting
files in that folder.
Since most resource observers are interested in a list of
resources that change over time, `add_resource` and
`remove_resource` might be useful.
"""
def __init__(self, resource_observer, initial_resources=None,
timekeeper=None):
self.observer = resource_observer
self.resources = {}
if timekeeper is not None:
self.timekeeper = timekeeper
else:
self.timekeeper = ChangeIndicator()
if initial_resources is not None:
for resource in initial_resources:
self.add_resource(resource)
def add_resource(self, resource):
"""Add a resource to the list of interesting resources"""
if resource.exists():
self.resources[resource] = self.timekeeper.get_indicator(resource)
else:
self.resources[resource] = None
def remove_resource(self, resource):
"""Add a resource to the list of interesting resources"""
if resource in self.resources:
del self.resources[resource]
def clear_resources(self):
"""Removes all registered resources"""
self.resources.clear()
def resource_changed(self, resource):
changes = _Changes()
self._update_changes_caused_by_changed(changes, resource)
self._perform_changes(changes)
def _update_changes_caused_by_changed(self, changes, changed):
if changed in self.resources:
changes.add_changed(changed)
if self._is_parent_changed(changed):
changes.add_changed(changed.parent)
def _update_changes_caused_by_moved(self, changes, resource,
new_resource=None):
if resource in self.resources:
changes.add_removed(resource, new_resource)
if new_resource in self.resources:
changes.add_created(new_resource)
if resource.is_folder():
for file in list(self.resources):
if resource.contains(file):
new_file = self._calculate_new_resource(
resource, new_resource, file)
changes.add_removed(file, new_file)
if self._is_parent_changed(resource):
changes.add_changed(resource.parent)
if new_resource is not None:
if self._is_parent_changed(new_resource):
changes.add_changed(new_resource.parent)
def _is_parent_changed(self, child):
return child.parent in self.resources
def resource_moved(self, resource, new_resource):
changes = _Changes()
self._update_changes_caused_by_moved(changes, resource, new_resource)
self._perform_changes(changes)
def resource_created(self, resource):
changes = _Changes()
self._update_changes_caused_by_created(changes, resource)
self._perform_changes(changes)
def _update_changes_caused_by_created(self, changes, resource):
if resource in self.resources:
changes.add_created(resource)
if self._is_parent_changed(resource):
changes.add_changed(resource.parent)
def resource_removed(self, resource):
changes = _Changes()
self._update_changes_caused_by_moved(changes, resource)
self._perform_changes(changes)
def _perform_changes(self, changes):
for resource in changes.changes:
self.observer.resource_changed(resource)
self.resources[resource] = self.timekeeper.get_indicator(resource)
for resource, new_resource in changes.moves.items():
self.resources[resource] = None
if new_resource is not None:
self.observer.resource_moved(resource, new_resource)
else:
self.observer.resource_removed(resource)
for resource in changes.creations:
self.observer.resource_created(resource)
self.resources[resource] = self.timekeeper.get_indicator(resource)
def validate(self, resource):
changes = _Changes()
for file in self._search_resource_moves(resource):
if file in self.resources:
self._update_changes_caused_by_moved(changes, file)
for file in self._search_resource_changes(resource):
if file in self.resources:
self._update_changes_caused_by_changed(changes, file)
for file in self._search_resource_creations(resource):
if file in self.resources:
changes.add_created(file)
self._perform_changes(changes)
def _search_resource_creations(self, resource):
creations = set()
if resource in self.resources and resource.exists() and \
self.resources[resource] is None:
creations.add(resource)
if resource.is_folder():
for file in self.resources:
if file.exists() and resource.contains(file) and \
self.resources[file] is None:
creations.add(file)
return creations
def _search_resource_moves(self, resource):
all_moved = set()
if resource in self.resources and not resource.exists():
all_moved.add(resource)
if resource.is_folder():
for file in self.resources:
if resource.contains(file):
if not file.exists():
all_moved.add(file)
moved = set(all_moved)
for folder in [file for file in all_moved if file.is_folder()]:
if folder in moved:
for file in list(moved):
if folder.contains(file):
moved.remove(file)
return moved
def _search_resource_changes(self, resource):
changed = set()
if resource in self.resources and self._is_changed(resource):
changed.add(resource)
if resource.is_folder():
for file in self.resources:
if file.exists() and resource.contains(file):
if self._is_changed(file):
changed.add(file)
return changed
def _is_changed(self, resource):
if self.resources[resource] is None:
return False
return self.resources[resource] != self.timekeeper.get_indicator(resource)
def _calculate_new_resource(self, main, new_main, resource):
if new_main is None:
return None
diff = resource.path[len(main.path):]
return resource.project.get_resource(new_main.path + diff)
class ChangeIndicator(object):
def get_indicator(self, resource):
"""Return the modification time and size of a `Resource`."""
path = resource.real_path
# on dos, mtime does not change for a folder when files are added
if os.name != 'posix' and os.path.isdir(path):
return (os.path.getmtime(path),
len(os.listdir(path)),
os.path.getsize(path))
return (os.path.getmtime(path),
os.path.getsize(path))
class _Changes(object):
def __init__(self):
self.changes = set()
self.creations = set()
self.moves = {}
def add_changed(self, resource):
self.changes.add(resource)
def add_removed(self, resource, new_resource=None):
self.moves[resource] = new_resource
def add_created(self, resource):
self.creations.add(resource)
| apache-2.0 |
shreyasva/tensorflow | tensorflow/python/ops/linalg_ops.py | 4 | 11307 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_linalg_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_linalg_ops import *
# pylint: enable=wildcard-import
@ops.RegisterShape("Cholesky")
def _CholeskyShape(op):
input_shape = op.inputs[0].get_shape().with_rank(2)
# The matrix must be square.
input_shape[0].assert_is_compatible_with(input_shape[1])
return [input_shape]
@ops.RegisterShape("BatchCholesky")
def _BatchCholeskyShape(op):
input_shape = op.inputs[0].get_shape().with_rank_at_least(3)
# The matrices in the batch must be square.
input_shape[-1].assert_is_compatible_with(input_shape[-2])
return [input_shape]
@ops.RegisterShape("MatrixDeterminant")
def _MatrixDeterminantShape(op):
input_shape = op.inputs[0].get_shape().with_rank(2)
# The matrix must be square.
input_shape[0].assert_is_compatible_with(input_shape[1])
if input_shape.ndims is not None:
return [tensor_shape.scalar()]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("BatchMatrixDeterminant")
def _BatchMatrixDeterminantShape(op):
input_shape = op.inputs[0].get_shape().with_rank_at_least(3)
# The matrices in the batch must be square.
input_shape[-1].assert_is_compatible_with(input_shape[-2])
if input_shape.ndims is not None:
return [input_shape[:-2]]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("MatrixInverse")
def _MatrixInverseShape(op):
input_shape = op.inputs[0].get_shape().with_rank(2)
# The matrix must be square.
input_shape[0].assert_is_compatible_with(input_shape[1])
return [input_shape]
@ops.RegisterShape("BatchMatrixInverse")
def _BatchMatrixInverseShape(op):
input_shape = op.inputs[0].get_shape().with_rank_at_least(3)
# The matrices in the batch must be square.
input_shape[-1].assert_is_compatible_with(input_shape[-2])
return [input_shape]
@ops.RegisterShape("SelfAdjointEig")
def _SelfAdjointEigShape(op):
input_shape = op.inputs[0].get_shape().with_rank(2)
# The matrix must be square.
input_shape[0].assert_is_compatible_with(input_shape[1])
d = input_shape.dims[0]
out_shape = tensor_shape.TensorShape([d+1, d])
return [out_shape]
@ops.RegisterShape("BatchSelfAdjointEig")
def _BatchSelfAdjointEigShape(op):
input_shape = op.inputs[0].get_shape().with_rank_at_least(3)
# The matrices in the batch must be square.
input_shape[-1].assert_is_compatible_with(input_shape[-2])
dlist = input_shape.dims
dlist[-2] += 1
out_shape = tensor_shape.TensorShape(dlist)
return [out_shape]
@ops.RegisterShape("MatrixSolve")
def _MatrixSolveShape(op):
lhs_shape = op.inputs[0].get_shape().with_rank(2)
rhs_shape = op.inputs[1].get_shape().with_rank_at_least(2)
# The matrix must be square.
lhs_shape[0].assert_is_compatible_with(lhs_shape[1])
# The matrix and right-hand side must have the same number of rows.
lhs_shape[0].assert_is_compatible_with(rhs_shape[0])
return [[lhs_shape[1], rhs_shape[1]]]
@ops.RegisterShape("BatchMatrixSolve")
def _BatchMatrixSolveShape(op):
lhs_shape = op.inputs[0].get_shape().with_rank_at_least(3)
rhs_shape = op.inputs[1].get_shape().with_rank_at_least(3)
# The matrices must be square.
lhs_shape[-1].assert_is_compatible_with(lhs_shape[-2])
# The matrices and right-hand sides in the batch must have the same number of
# rows.
lhs_shape[-2].assert_is_compatible_with(rhs_shape[-2])
return [lhs_shape[:-2].concatenate(rhs_shape[-1])]
@ops.RegisterShape("MatrixTriangularSolve")
def _MatrixTriangularSolveShape(op):
lhs_shape = op.inputs[0].get_shape().with_rank(2)
rhs_shape = op.inputs[1].get_shape().with_rank_at_least(2)
# The matrix must be square.
lhs_shape[0].assert_is_compatible_with(lhs_shape[1])
# The matrix and righ-hand side must have the same number of rows.
lhs_shape[0].assert_is_compatible_with(rhs_shape[0])
return [rhs_shape]
@ops.RegisterShape("BatchMatrixTriangularSolve")
def _BatchMatrixTriangularSolveShape(op):
lhs_shape = op.inputs[0].get_shape().with_rank_at_least(3)
rhs_shape = op.inputs[1].get_shape().with_rank_at_least(3)
# The matrices must be square.
lhs_shape[-1].assert_is_compatible_with(lhs_shape[-2])
# The matrices and righ-hand sides in the batch must have the same number of
# rows.
lhs_shape[-2].assert_is_compatible_with(rhs_shape[-2])
return [rhs_shape]
@ops.RegisterShape("MatrixSolveLs")
def _MatrixSolveLsShape(op):
lhs_shape = op.inputs[0].get_shape().with_rank(2)
rhs_shape = op.inputs[1].get_shape().with_rank_at_least(2)
# The matrix and right-hand side must have the same number of rows.
lhs_shape[0].assert_is_compatible_with(rhs_shape[0])
return [[lhs_shape[1], rhs_shape[1]]]
@ops.RegisterShape("BatchMatrixSolveLs")
def _BatchMatrixSolveLsShape(op):
lhs_shape = op.inputs[0].get_shape().with_rank_at_least(3)
rhs_shape = op.inputs[1].get_shape().with_rank_at_least(3)
# The matrices and right-hand sides in the batch must have the same number of
# rows.
lhs_shape[-2].assert_is_compatible_with(rhs_shape[-2])
return [lhs_shape[:-3].concatenate([lhs_shape[-1], rhs_shape[-1]])]
# pylint: disable=invalid-name
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves a linear least-squares problem.
Below we will use the following notation
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the regularized
least-squares problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}}
||A Z - B||_F^2 + \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is
computed as \\(X = A^T (A A^T + \lambda I)^{-1} B\\),
which (for \\(\lambda = 0\\)) is the minimum-norm solution to the
under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\),
subject to \\(A Z = B\\).
Notice that the fast path is only numerically stable when \\(A\\) is
numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\)
or \\(\lambda\\) is sufficiently large.
If `fast` is `False` then the solution is computed using the rank revealing
QR decomposition with column pivoting. This will always compute a
least-squares solution that minimizes the residual norm
\\(||A X - B||_F^2 \\), even when \\(A\\) is rank deficient or
ill-conditioned. Notice: The current version does not compute a minimum norm
solution. If `fast` is `False` then `l2_regularizer` is ignored.
Args:
matrix: 2-D `Tensor` of shape `[M, N]`.
rhs: 2-D `Tensor` of shape is `[M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: Matrix of shape `[N, K]` containing the matrix that solves
`matrix * output = rhs` in the least-squares sense.
"""
return gen_linalg_ops.matrix_solve_ls(matrix,
rhs,
l2_regularizer,
fast=fast,
name=name)
def batch_matrix_solve_ls(matrix,
rhs,
l2_regularizer=0.0,
fast=True,
name=None):
r"""Solves multiple linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
`Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
Below we will use the following notation for each pair of
matrix and right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
\\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
the minimum-norm solution to the under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
\\(A Z = B\\). Notice that the fast path is only numerically stable when
\\(A\\) is numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
is sufficiently large.
If `fast` is `False` then the solution is computed using the rank revealing
QR decomposition with column pivoting. This will always compute a
least-squares solution that minimizes the residual norm \\(||A X - B||_F^2\\),
even when \\(A\\) is rank deficient or ill-conditioned. Notice: The current
version does not compute a minimum norm solution. If `fast` is `False` then
`l2_regularizer` is ignored.
Args:
matrix: `Tensor` of shape `[..., M, N]`.
rhs: `Tensor` of shape `[..., M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
"""
return gen_linalg_ops.batch_matrix_solve_ls(matrix,
rhs,
l2_regularizer,
fast=fast,
name=name)
# pylint: enable=invalid-name
| apache-2.0 |
jmartinm/InvenioAuthorLists | modules/websubmit/lib/functions/Move_Uploaded_Files_to_Storage.py | 8 | 3024 | ## $Id: Move_Revised_Files_to_Storage.py,v 1.20 2009/03/26 13:48:42 jerome Exp $
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit function - Archives files uploaded with the upload file
interface.
To be used on par with Create_Upload_Files_Interface.py function:
- Create_Upload_Files_Interface records the actions performed by user.
- Move_Uploaded_Files_to_Storage executes the recorded actions.
NOTE:
=====
- Due to the way WebSubmit works, this function can only work when
positionned at step 2 in WebSubmit admin, and
Create_Upload_Files_Interface is at step 1
"""
__revision__ = "$Id$"
from invenio import websubmit_managedocfiles
def Move_Uploaded_Files_to_Storage(parameters, curdir, form, user_info=None):
"""
The function moves files uploaded using the
Create_Upload_Files_Interface.py function.
It reads the action previously performed by the user on the files
and calls the corresponding functions of bibdocfile.
@param parameters:(dictionary) - must contain:
+ iconsizes: sizes of the icons to create (when applicable),
separated by commas. Eg: 180>,700>
+ createIconDoctypes: the list of doctypes for which an icon
should be created.
Eg:
Figure|Graph
('|' separated values)
Use '*' for all doctypes
+ forceFileRevision: when revising attributes of a file
(comment, description) without
uploading a new file, force a revision of
the current version (so that old comment,
description, etc. is kept) (1) or not (0).
"""
global sysno
recid = int(sysno)
iconsize = parameters.get('iconsize').split(',')
create_icon_doctypes = parameters.get('createIconDoctypes').split('|')
force_file_revision = (parameters.get('forceFileRevision') == '1')
websubmit_managedocfiles.move_uploaded_files_to_storage(curdir,
recid, iconsize,
create_icon_doctypes,
force_file_revision)
| gpl-2.0 |
diego-plan9/beets | test/test_importfeeds.py | 16 | 2214 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import os
import os.path
import tempfile
import shutil
import unittest
from beets import config
from beets.library import Item, Album, Library
from beetsplug.importfeeds import ImportFeedsPlugin
class ImportfeedsTestTest(unittest.TestCase):
def setUp(self):
config.clear()
config.read(user=False)
self.importfeeds = ImportFeedsPlugin()
self.lib = Library(':memory:')
self.feeds_dir = tempfile.mkdtemp()
config['importfeeds']['dir'] = self.feeds_dir
def tearDown(self):
shutil.rmtree(self.feeds_dir)
def test_multi_format_album_playlist(self):
config['importfeeds']['formats'] = 'm3u_multi'
album = Album(album='album/name', id=1)
item_path = os.path.join('path', 'to', 'item')
item = Item(title='song', album_id=1, path=item_path)
self.lib.add(album)
self.lib.add(item)
self.importfeeds.album_imported(self.lib, album)
playlist_path = os.path.join(self.feeds_dir,
os.listdir(self.feeds_dir)[0])
self.assertTrue(playlist_path.endswith('album_name.m3u'))
with open(playlist_path) as playlist:
self.assertIn(item_path, playlist.read())
def test_playlist_in_subdir(self):
config['importfeeds']['formats'] = 'm3u'
config['importfeeds']['m3u_name'] = \
os.path.join('subdir', 'imported.m3u')
album = Album(album='album/name', id=1)
item_path = os.path.join('path', 'to', 'item')
item = Item(title='song', album_id=1, path=item_path)
self.lib.add(album)
self.lib.add(item)
self.importfeeds.album_imported(self.lib, album)
playlist = os.path.join(self.feeds_dir,
config['importfeeds']['m3u_name'].get())
playlist_subdir = os.path.dirname(playlist)
self.assertTrue(os.path.isdir(playlist_subdir))
self.assertTrue(os.path.isfile(playlist))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit |
gnu3ra/SCC15HPCRepast | INSTALLATION/boost_1_54_0/tools/build/v2/test/test_rc.py | 38 | 3914 | #!/usr/bin/python
# Copyright 2012 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests rc toolset behaviour.
import BoostBuild
def included_resource_newer_than_rc_script():
"""
When a .rc script file includes another resource file - the resource file
being newer than the .rc script file should not cause the .rc script file
to be considered old and force all of its dependents to rebuild.
"""
toolsetName = "__myDummyResourceCompilerToolset__"
# Used options rationale:
#
# -d4 & --debug-configuration
# Display additional information in case of test failure. In the past
# we have had testing system issues causing this test to fail
# sporadically for which -d+3 output had been instrumental in getting to
# the root cause (a touched file's timestamp was not as new as it should
# have been).
#
# --ignore-site-config --user-config=
# Disable reading any external Boost Build configuration. This test is
# self sufficient so these options protect it from being adversly
# affected by any local (mis)configuration..
t = BoostBuild.Tester(["-d4", "--debug-configuration",
"--ignore-site-config", "--user-config=", "toolset=%s" % toolsetName],
pass_d0=False, pass_toolset=False, use_test_config=False,
translate_suffixes=False)
# Prepare a dummy toolset so we do not get errors in case the default one
# is not found and that we can test rc.jam functionality without having to
# depend on the externally specified toolset actually supporting it exactly
# the way it is required for this test, e.g. gcc toolset, under some
# circumstances, uses a quiet action for generating its null RC targets.
t.write(toolsetName + ".jam", """\
import feature ;
import rc ;
import type ;
local toolset-name = "%s" ;
feature.extend toolset : $(toolset-name) ;
rule init ( ) { }
rc.configure dummy-rc-command : <toolset>$(toolset-name) : <rc-type>dummy ;
module rc
{
rule compile.resource.dummy ( targets * : sources * : properties * )
{
import common ;
.TOUCH on $(targets) = [ common.file-touch-command ] ;
}
actions compile.resource.dummy { $(.TOUCH) "$(<)" }
}
# Make OBJ files generated by our toolset use the "obj" suffix on all
# platforms. We need to do this explicitly for <target-os> windows & cygwin to
# override the default OBJ type configuration (otherwise we would get
# 'ambiguous key' errors on those platforms).
local rule set-generated-obj-suffix ( target-os ? )
{
type.set-generated-target-suffix OBJ : <toolset>$(toolset-name)
<target-os>$(target-os) : obj ;
}
set-generated-obj-suffix ;
set-generated-obj-suffix windows ;
set-generated-obj-suffix cygwin ;
""" % toolsetName)
# Prepare project source files.
t.write("jamroot.jam", """\
ECHO {{{ [ modules.peek : XXX ] [ modules.peek : NOEXEC ] }}} ;
obj xxx : xxx.rc ;
""")
t.write("xxx.rc", '1 MESSAGETABLE "xxx.bin"\n')
t.write("xxx.bin", "foo")
def test1(n, expect, noexec=False):
params = ["-sXXX=%d" % n]
if noexec:
params.append("-n")
params.append("-sNOEXEC=NOEXEC")
t.run_build_system(params)
t.expect_output_lines("*NOEXEC*", noexec)
obj_file = "xxx_res.obj"
t.expect_output_lines("compile.resource.dummy *%s" % obj_file, expect)
if expect and not noexec:
expect("bin/%s/debug/%s" % (toolsetName, obj_file))
t.expect_nothing_more()
def test(n, expect):
test1(n, expect, noexec=True)
test1(n, expect)
test(1, t.expect_addition)
test(2, None)
t.touch("xxx.bin")
test(3, t.expect_touch)
test(4, None)
t.cleanup()
included_resource_newer_than_rc_script()
| bsd-3-clause |
smarthomeNG/smarthome | lib/scheduler.py | 2 | 38055 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2011-2014 Marcus Popp [email protected]
# Copyright 2016- Christian Straßburg
# Copyright 2017 Bernd Meiners [email protected]
#########################################################################
# This file is part of SmartHomeNG
#
# SmartHomeNG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHomeNG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHomeNG. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
import logging
import time
import datetime
import calendar
import sys
import traceback
import threading
import random
import inspect
import copy
from lib.shtime import Shtime
from lib.item import Items
from lib.model.smartplugin import SmartPlugin
import dateutil.relativedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
from dateutil.tz import tzutc
# following modules) are imported to have those functions available during logic execution
import gc # noqa
import os
import math
import types
import subprocess
try:
from lib.module import Modules
_lib_modules_found = True
except:
_lib_modules_found = False
logger = logging.getLogger(__name__)
_scheduler_instance = None # Pointer to the initialized instance of the scheduler class (for use by static methods)
class _PriorityQueue:
"""
Implements a queue which contain tuples of priority and data sorted by priority.
Lowest priority given will be the first candidate for a get from the queue, data can be anything
"""
def __init__(self):
self.queue = []
self.lock = threading.Lock()
def insert(self, priority, data):
"""
Add a tuple with priority and data into the queue
:param priority: a positive integer or a tuple where lowest indicates the highest priority
:param data: anything to be associated with the given priority
"""
self.lock.acquire()
lo = 0
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if priority < self.queue[mid][0]:
hi = mid
else:
lo = mid + 1
self.queue.insert(lo, (priority, data))
self.lock.release()
def get(self):
"""
Returns the first tuple of the queue
:return: tuple with priority and data or None if no entry is available in the queue
"""
self.lock.acquire()
try:
return self.queue.pop(0)
except IndexError:
raise
finally:
self.lock.release()
def qsize(self):
"""
Returns the actual size of the queue
:return: Size of the queue
"""
return len(self.queue)
def dump(self):
"""
Returns all entries of the queue as a list
:return: list of all queue entries
"""
queue_list = []
self.lock.acquire()
for entry in self.queue:
queue_list.append(entry)
self.lock.release()
return queue_list
class Scheduler(threading.Thread):
_workers = []
_worker_num = 5
_worker_max = 20
_worker_delta = 60 # wait 60 seconds before adding another worker thread
_scheduler = {} # holder schedulers, key is the scheduler name. Each scheduler is stored in a dict
# (keys are 'obj', 'active', 'prio', 'next', 'value', 'cycle', 'cron')
_runq = _PriorityQueue() # holds priority and a tuple of (name, obj, by, source, dest, value) for immediate execution
_triggerq = _PriorityQueue() # holds tuples of (datetime, priority) and (name, obj, by, source, dest, value)
# to be put in the run queue when time is due
_pluginname_prefix = 'plugins.' # prefix for scheduler names
def __init__(self, smarthome):
threading.Thread.__init__(self, name='Scheduler')
logger.info('Init Scheduler')
self._sh = smarthome
self._lock = threading.Lock()
self._runc = threading.Condition()
global _scheduler_instance
if _scheduler_instance is not None:
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 4)
logger.critical("A second 'scheduler' object has been created. There should only be ONE instance of class 'Scheduler'!!! Called from: {} ({})".format(calframe[1][1], calframe[1][3]))
_scheduler_instance = self
self.shtime = Shtime.get_instance()
self.items = Items.get_instance()
self.mqtt = None
# --------------------------------------------------------------------------------------------------
# Following (static) method of the class Scheduler implement the API for schedulers in SmartHomeNG
# --------------------------------------------------------------------------------------------------
@staticmethod
def get_instance():
"""
Returns the instance of the scheduler class, to be used to access the scheduler-api
Use it the following way to access the api:
.. code-block:: python
from lib.scheduler import Scheduler
scheduler = Scheduler.get_instance()
# to access a method (eg. to trigger a logic):
scheduler.trigger(...)
:return: scheduler instance
:rtype: object or None
"""
if _scheduler_instance == None:
return None
else:
return _scheduler_instance
def get_worker_count(self):
"""
Get number of worker threads initialized by scheduler
:return: number of worker threads
"""
return len(self._workers)
def get_idle_worker_count(self):
"""
Get number of idle worker threads
:return: number of worker threads
"""
idle_count = 0
for w in self._workers:
if w.name == 'idle':
idle_count +=1
return idle_count
def get_worker_names(self):
"""
Get names on non-idle worker threads
:return: list with names of worker threads
"""
worker_names = []
for w in self._workers:
if w.name != 'idle':
worker_names.append(w.name)
return worker_names
def run(self):
self.alive = True
logger.debug("creating {0} workers".format(self._worker_num))
for i in range(self._worker_num):
self._add_worker()
while self.alive:
now = self.shtime.now()
if self._runq.qsize() > len(self._workers):
delta = now - self._last_worker
if delta.seconds > self._worker_delta:
if len(self._workers) < self._worker_max:
self._add_worker()
else:
logger.error("Needing more worker threads than the specified maximum of {}! ({} worker threads active)".format(self._worker_max, len(self._workers)))
tn = {}
# for t in threading.enumerate():
for t in self._workers:
tn[t.name] = tn.get(t.name, 0) + 1
logger.info('Worker-Threads: ' + ', '.join("{0}: {1}".format(k, v) for (k, v) in list(tn.items())))
if int(self._sh._restart_on_num_workers) < self._worker_max:
# do no restart
self._add_worker()
else:
if len(self._workers) < int(self._sh._restart_on_num_workers):
self._add_worker()
else:
logger.warning('Worker-Threads: ' + ', '.join("{0}: {1}".format(k, v) for (k, v) in list(tn.items())))
self._sh.restart('SmartHomeNG (scheduler started too many worker threads ({}))'.format(len(self._workers)))
while self._triggerq.qsize() > 0:
try:
(dt, prio), (name, obj, by, source, dest, value) = self._triggerq.get()
except Exception as e:
logger.warning("Trigger queue exception: {0}".format(e))
break
if dt < now: # run it
self._runc.acquire()
self._runq.insert(prio, (name, obj, by, source, dest, value))
self._runc.notify()
self._runc.release()
else: # put last entry back and break while loop
self._triggerq.insert((dt, prio), (name, obj, by, source, dest, value))
break
# For debugging
# task_count = 0
# for name in self._scheduler:
# task = self._scheduler[name]
# if task['next'] is not None:
# task_count += 1
# End for debugging
if not self._lock.acquire(timeout=1):
# logger.critical("Scheduler: Deadlock! - Task Count to enter run queue: {}".format(task_count))
logger.critical("Scheduler: Deadlock!")
continue
try:
for name in self._scheduler:
task = self._scheduler[name]
if task['next'] is not None:
if task['next'] < now:
self._runc.acquire()
self._runq.insert(task['prio'], (name, task['obj'], 'Scheduler', None, None, task['value']))
self._runc.notify()
self._runc.release()
task['next'] = None
else:
continue
elif not task['active']:
continue
else:
if task['cron'] is None and task['cycle'] is None:
continue
else:
self._next_time(name)
finally:
self._lock.release()
time.sleep(0.5)
def stop(self):
self.alive = False
def trigger(self, name, obj=None, by='Logic', source=None, value=None, dest=None, prio=3, dt=None, from_smartplugin=False):
"""
triggers the execution of a logic optional at a certain datetime given with dt
:param name:
:param obj:
:param by:
:param source:
:param value:
:param dest:
:param prio:
:param dt: a certain datetime
:return: always None
"""
name = self.check_caller(name, from_smartplugin)
if obj is None:
if name in self._scheduler:
obj = self._scheduler[name]['obj']
else:
logger.warning("Logic name not found: {0}".format(name))
return
if name in self._scheduler:
if not self._scheduler[name]['active']:
logger.debug("Logic '{0}' deactivated. Ignoring trigger from {1} {2}".format(name, by, source))
return
if dt is None:
logger.debug("Triggering {0} - by: {1} source: {2} dest: {3} value: {4}".format(name, by, source, dest, str(value)[:40]))
self._runc.acquire()
self._runq.insert(prio, (name, obj, by, source, dest, value))
self._runc.notify()
self._runc.release()
else:
if not isinstance(dt, datetime.datetime):
logger.warning("Trigger: Not a valid timezone aware datetime for {0}. Ignoring.".format(name))
return
if dt.tzinfo is None:
logger.warning("Trigger: Not a valid timezone aware datetime for {0}. Ignoring.".format(name))
return
logger.debug("Triggering {0} - by: {1} source: {2} dest: {3} value: {4} at: {5}".format(name, by, source, dest, str(value)[:40], dt))
self._triggerq.insert((dt, prio), (name, obj, by, source, dest, value))
def remove(self, name, from_smartplugin=False):
"""
Remove a scheduler entry with given name. If a call is made from a SmartPlugin with an instance configuration
the instance name is added to the name to be able to distinguish scheduler entries from different instances
:param name: scheduler entry name to remove
:param from_smartplugin:
"""
self._lock.acquire()
try:
name = self.check_caller(name, from_smartplugin)
logger.debug("remove scheduler entry with name:{0}".format(name))
if name in self._scheduler:
del(self._scheduler[name])
except:
logger.error("Could not remove scheduler entry for {}".format(name))
finally:
self._lock.release()
def check_caller(self, name, from_smartplugin=False):
"""
Checks the calling stack if the calling function (one of get, change, remove, trigger) itself was called by
a smartplugin instance. If there is an instance name of the calling smartplugin then the instance name of that
calling smartplugin is appended to the name
:param name: the name of a scheduler entry
:param from_smartplugin:
:return: returns either the name or name combined with instance name
"""
stack = inspect.stack()
try:
obj = stack[2][0].f_locals["self"]
if isinstance(obj, SmartPlugin):
iname = obj.get_instance_name()
if iname != '':
# if not (iname).startswith(self._pluginname_prefix):
if not from_smartplugin:
if not str(name).endswith('_' + iname):
name = name + '_' + obj.get_instance_name()
except:
pass
return name
def return_next(self, name, from_smartplugin=False):
# name = self.check_caller(name, from_smartplugin) # ms
if name in self._scheduler:
return self._scheduler[name]['next']
def add(self, name, obj, prio=3, cron=None, cycle=None, value=None, offset=None, next=None, from_smartplugin=False):
"""
Adds an entry to the scheduler.
:param name: Name of the scheduler
:param obj: Method to call by the scheduler
:param prio: a priority with default of 3 having 1 as most important and higher numbers less important
:param cron: a crontab entry of type string or a list of entries
:param cycle: a time given as integer in seconds or a string with a time given in seconds and a value after an equal sign
:param value: Value that an item should be set to or to be handed to a logic, otherwise: None
:param offset: an optional offset for cycle. If not given, cycle start point will be varied between 10..15 seconds to prevent too many scheduler entries with the same starting times
:param next:
:param from_smartplugin: Only to set to True, if called from the internal method in SmartPlugin class
"""
# set shtime and items if they were initialized to None in __init__ (potenital timing problem in init of shng)
if self.shtime == None:
self.shtime = Shtime.get_instance()
if self.items == None:
self.items = Items.get_instance()
self._lock.acquire()
try:
if isinstance(cron, str):
cron = [cron, ]
if isinstance(cron, list):
_cron = {}
for entry in cron:
desc, __, _value = entry.partition('=')
desc = desc.strip()
if _value == '':
_value = None
else:
_value = _value.strip()
if desc.lower().startswith('init'):
offset = 5 # default init offset
desc, op, seconds = desc.partition('+')
if op:
offset += int(seconds)
else:
desc, op, seconds = desc.partition('-')
if op:
offset -= int(seconds)
value = _value
next = self.shtime.now() + datetime.timedelta(seconds=offset)
else:
_cron[desc] = _value
if _cron == {}:
cron = None
else:
cron = _cron
if isinstance(cycle, int):
cycle = {cycle: None}
elif isinstance(cycle, str):
cycle, __, _value = cycle.partition('=')
try:
cycle = int(cycle.strip())
except Exception:
logger.warning("Scheduler: Invalid cycle entry for {0} {1}".format(name, cycle))
return
if _value != '':
_value = _value.strip()
else:
_value = None
cycle = {cycle: _value}
if cycle is not None and offset is None: # spread cycle jobs
offset = random.randint(10, 15)
# change name for multi instance plugins
if obj.__class__.__name__ == 'method':
if isinstance(obj.__self__, SmartPlugin):
if obj.__self__.get_instance_name() != '':
#if not (name).startswith(self._pluginname_prefix):
if not from_smartplugin:
name = name +'_'+ obj.__self__.get_instance_name()
logger.debug("Scheduler: Name changed by adding plugin instance name to: " + name)
self._scheduler[name] = {'prio': prio, 'obj': obj, 'cron': cron, 'cycle': cycle, 'value': value, 'next': next, 'active': True}
if next is None:
self._next_time(name, offset)
finally:
self._lock.release()
def get(self, name, from_smartplugin=False):
"""
takes a given name for a scheduler and returns either the matching scheduler or None
"""
name = self.check_caller(name, from_smartplugin)
if name in self._scheduler:
return self._scheduler[name]
else:
return None
def change(self, name, from_smartplugin=False, **kwargs):
name = self.check_caller(name, from_smartplugin)
if name in self._scheduler:
for key in kwargs:
if key in self._scheduler[name]:
if key == 'cron':
if isinstance(kwargs[key], str):
_cron = {}
for entry in kwargs[key].split('|'):
desc, __, _value = entry.partition('=')
desc = desc.strip()
if _value == '':
_value = None
else:
_value = _value.strip()
_cron[desc] = _value
if _cron == {}:
kwargs[key] = None
else:
kwargs[key] = _cron
elif key == 'cycle':
_cycle = kwargs[key]
if isinstance(kwargs[key], dict):
_cycle = kwargs[key]
elif isinstance(kwargs[key], int):
_cycle = {kwargs[key]: None}
elif isinstance(kwargs[key], str):
_param = kwargs[key].strip()
if _param[0] == '{' and _param[-1] == '}':
_param = _param[1:-1]
_cycle, __, _value = _param.partition(':')
try:
_cycle = int(_cycle.strip())
except Exception:
logger.warning("scheduler.change: Invalid cycle entry for {} {}".format(name, _cycle))
return
if _value != '':
_value = _value.strip()
else:
_value = None
_cycle = {_cycle: _value}
#logger.warning("scheduler.change: {}: {}, type = type(kwargs[key])={}".format(name, kwargs[key], type(kwargs[key])))
kwargs[key] = _cycle
#logger.warning("scheduler.change: {}: cycle entry {}".format(name, _cycle))
elif key == 'active':
if kwargs['active'] and not self._scheduler[name]['active']:
logger.info("Activating logic: {0}".format(name))
elif not kwargs['active'] and self._scheduler[name]['active']:
logger.info("Deactivating logic: {0}".format(name))
self._scheduler[name][key] = kwargs[key]
else:
logger.warning("Attribute {0} for {1} not specified. Could not change it.".format(key, name))
if self._scheduler[name]['active'] is True:
if 'cycle' in kwargs or 'cron' in kwargs:
self._next_time(name)
else:
self._scheduler[name]['next'] = None
else:
logger.warning("Could not change {0}. No logic/method with this name found.".format(name))
def _next_time(self, name, offset=None):
"""
Looks at the cycle and crontab attributes of job with name to find the next time
for them and puts this and the value to the job.
:param name: the name of the job
:param offset: if a cycle attribute is present, then this value offsets the next execution time of a cycle
"""
job = self._scheduler[name]
if None == job['cron'] == job['cycle']:
self._scheduler[name]['next'] = None
return
next_time = None
value = None
now = self.shtime.now()
now = now.replace(microsecond=0)
if job['cycle'] is not None:
cycle = list(job['cycle'].keys())[0]
value = job['cycle'][cycle]
if offset is None:
offset = cycle
next_time = now + datetime.timedelta(seconds=offset)
if job['cron'] is not None:
for entry in job['cron']:
ct = self._crontab(entry)
if next_time is not None:
if ct < next_time:
next_time = ct
value = job['cron'][entry]
else:
next_time = ct
value = job['cron'][entry]
self._scheduler[name]['next'] = next_time
self._scheduler[name]['value'] = value
if name not in ['Connections', 'series', 'SQLite dump']:
logger.debug("{0} next time: {1}".format(name, next_time))
def __iter__(self):
for job in self._scheduler:
yield job
def _add_worker(self):
self._last_worker = self.shtime.now()
t = threading.Thread(target=self._worker)
t.start()
self._workers.append(t)
if len(self._workers) > self._worker_num:
logger.info("Adding worker thread. Total: {0}".format(len(self._workers)))
tn = {}
for t in threading.enumerate():
tn[t.name] = tn.get(t.name, 0) + 1
logger.info('Threads: ' + ', '.join("{0}: {1}".format(k, v) for (k, v) in list(tn.items())))
def _worker(self):
while self.alive:
self._runc.acquire()
self._runc.wait(timeout=1)
try:
prio, (name, obj, by, source, dest, value) = self._runq.get()
except IndexError:
continue
finally:
self._runc.release()
self._task(name, obj, by, source, dest, value)
def _task(self, name, obj, by, source, dest, value):
threading.current_thread().name = name
logger = logging.getLogger(name)
if obj.__class__.__name__ == 'Logic':
source_details = None
if isinstance(source, dict):
source_details = source.get('details', '')
source = source.get('item', '')
trigger = {'by': by, 'source': source, 'source_details': source_details, 'dest': dest, 'value': value} # noqa
#following variables are assigned to be available during logic execution
sh = self._sh # noqa
shtime = self.shtime
items = self.items
# set the logic environment here (for use within functions in logics):
logic = obj # noqa
logic.sh = sh
logic.logger = logger
logic.shtime = shtime
logic.items = items
logic.trigger_dict = trigger # logic.trigger has naming conflict with method logic.trigger of lib.item
logics = obj._logics
if not self.mqtt:
if _lib_modules_found:
self.mqtt = Modules.get_instance().get_module('mqtt')
mqtt = self.mqtt
logic.mqtt = mqtt
try:
if logic.enabled:
exec(obj.bytecode)
# store timestamp of last run
obj.set_last_run()
for method in logic.get_method_triggers():
try:
method(logic, by, source, dest)
except Exception as e:
logger.exception("Logic: Trigger {} for {} failed: {}".format(method, logic.name, e))
except SystemExit:
# ignore exit() call from logic.
pass
except Exception as e:
tb = sys.exc_info()[2]
tb = traceback.extract_tb(tb)[-1]
logger.exception("Logic: {0}, File: {1}, Line: {2}, Method: {3}, Exception: {4}".format(name, tb[0], tb[1], tb[2], e))
elif obj.__class__.__name__ == 'Item':
try:
if value is not None:
obj(value, caller="Scheduler")
except Exception as e:
logger.exception("Item {0} exception: {1}".format(name, e))
else: # method
try:
if value is None:
obj()
else:
obj(**value)
except Exception as e:
logger.exception("Method {0} exception: {1}".format(name, e))
threading.current_thread().name = 'idle'
def _crontab(self, crontab):
"""
inspects if a crontab entry contains a sunbound time instruction (e.g. "17:00<sunset<20:00") or
if it contains a normal crontab entry (e.g. "*/5 6-19/1 * * *")
:param crontab: a string containing an enhanced crontab entry that may include a sunset/sunrise
:return: a timezone aware datetime with the next event time or an error datetime object that lies 10 years in the future
"""
try:
# process sunrise/sunset
for entry in crontab.split('<'):
if entry.startswith('sun'):
return self._sun(crontab)
next_event = self._parse_month(crontab) # this month
if not next_event:
next_event = self._parse_month(crontab, next_month=True) # next month
return next_event
except Exception as e:
logger.error('Error parsing crontab "{}": {}'.format(crontab, e))
return datetime.datetime.now(tzutc()) + dateutil.relativedelta.relativedelta(years=+10)
def _parse_month(self, crontab, next_month=False):
"""
Inspects a given string with classic crontab information to calculate the next point in time that matches
The function depends on the function now() of SmartHomeNG core
:param crontab: a string with crontab entries. It is expected to have the form of ``minute hour day weekday``
:param next_month: inspect the current month or the next following month
:return: false or datetime
"""
now = self.shtime.now()
try:
minute, hour, day, wday = crontab.strip().split()
except:
logger.warning("crontab entry '{}' can not be split up into 4 parts for minute, hour, day and weekday".format(crontab))
return False
# evaluate the crontab strings
minute_range = self._range(minute, 00, 59)
hour_range = self._range(hour, 00, 23)
if not next_month:
mdays = calendar.monthrange(now.year, now.month)[1]
elif now.month == 12:
mdays = calendar.monthrange(now.year + 1, 1)[1]
else:
mdays = calendar.monthrange(now.year, now.month + 1)[1]
if wday == '*' and day == '*':
day_range = self._day_range('0, 1, 2, 3, 4, 5, 6')
elif wday != '*' and day == '*':
day_range = self._range(wday,0,6)
day_range = self._day_range(','.join(day_range))
elif wday != '*' and day != '*':
day_range = self._range(wday,0,6)
day_range = self._day_range(','.join(day_range))
day_range = day_range + self._range(day, 0o1, mdays)
else:
day_range = self._range(day, 0o1, mdays)
# combine the different ranges
event_range = sorted([str(day) + '-' + str(hour) + '-' + str(minute) for minute in minute_range for hour in hour_range for day in day_range])
if next_month: # next month
next_event = event_range[0]
next_time = now + dateutil.relativedelta.relativedelta(months=+1)
else: # this month
now_str = now.strftime("%d-%H-%M")
next_event = self._next(lambda event: event > now_str, event_range)
if not next_event:
return False
next_time = now
day, hour, minute = next_event.split('-')
return next_time.replace(day=int(day), hour=int(hour), minute=int(minute), second=0, microsecond=0)
def _next(self, f, seq):
for item in seq:
if f(item):
return item
return False
def _sun(self, crontab):
"""
parses a given string with a time range to determine it's timely boundaries and
returns a time
:param: crontab contains a string with '[H:M<](sunrise|sunset)[+|-][offset][<H:M]' like e.g. '6:00<sunrise<8:00'
"""
# checking preconditions from configuration:
if not self._sh.sun: # no sun object created
logger.warning('No latitude/longitude specified. You could not use sunrise/sunset as crontab entry.')
return datetime.datetime.now(tzutc()) + dateutil.relativedelta.relativedelta(years=+10)
# find min/max times
tabs = crontab.split('<')
if len(tabs) == 1:
smin = None
cron = tabs[0].strip()
smax = None
elif len(tabs) == 2:
if tabs[0].startswith('sun'):
smin = None
cron = tabs[0].strip()
smax = tabs[1].strip()
else:
smin = tabs[0].strip()
cron = tabs[1].strip()
smax = None
elif len(tabs) == 3:
smin = tabs[0].strip()
cron = tabs[1].strip()
smax = tabs[2].strip()
else:
logger.error('Wrong syntax: {0}. Should be [H:M<](sunrise|sunset)[+|-][offset][<H:M]'.format(crontab))
return datetime.datetime.now(tzutc()) + dateutil.relativedelta.relativedelta(years=+10)
doff = 0 # degree offset
moff = 0 # minute offset
tmp, op, offs = cron.rpartition('+')
if op:
if offs.endswith('m'):
moff = int(offs.strip('m'))
else:
doff = float(offs)
else:
tmp, op, offs = cron.rpartition('-')
if op:
if offs.endswith('m'):
moff = -int(offs.strip('m'))
else:
doff = -float(offs)
if cron.startswith('sunrise'):
next_time = self._sh.sun.rise(doff, moff)
# time in next_time will be in utctime. So we need to adjust it
if next_time.tzinfo == tzutc():
next_time = next_time.astimezone(self.shtime.tzinfo())
else:
self.logger.warning("next_time.tzinfo was not given as utc!")
elif cron.startswith('sunset'):
next_time = self._sh.sun.set(doff, moff)
# time in next_time will be in utctime. So we need to adjust it
if next_time.tzinfo == tzutc():
next_time = next_time.astimezone(self.shtime.tzinfo())
else:
self.logger.warning("next_time.tzinfo was not given as utc!")
else:
logger.error('Wrong syntax: {0}. Should be [H:M<](sunrise|sunset)[+|-][offset][<H:M]'.format(crontab))
return datetime.datetime.now(tzutc()) + dateutil.relativedelta.relativedelta(years=+10)
now = self.shtime.now()
if smin is not None:
h, sep, m = smin.partition(':')
try:
dmin = next_time.replace(hour=int(h), minute=int(m), second=0, tzinfo=self.shtime.tzinfo())
except Exception:
logger.error('Wrong syntax: {0}. Should be [H:M<](sunrise|sunset)[+|-][offset][<H:M]'.format(crontab))
return datetime.datetime.now(tzutc()) + dateutil.relativedelta.relativedelta(years=+10)
if dmin > next_time:
next_time = dmin
if smax is not None:
h, sep, m = smax.partition(':')
try:
dmax = next_time.replace(hour=int(h), minute=int(m), second=0, tzinfo=self.shtime.tzinfo())
except Exception:
logger.error('Wrong syntax: {0}. Should be [H:M<](sunrise|sunset)[+|-][offset][<H:M]'.format(crontab))
return datetime.datetime.now(tzutc()) + dateutil.relativedelta.relativedelta(years=+10)
if dmax < next_time:
if dmax < now:
dmax = dmax + datetime.timedelta(days=1)
next_time = dmax
return next_time
def _range(self, entry, low, high):
"""
inspects a single crontab entry for minutes our hours
:param entry: a string with single entries of intervals, numeric ranges or single values
:param low: lower limit as integer
:param high: higher limit as integer
:return:
"""
result = []
item_range = []
# Check for multiple comma separated values and process each of them recursively
if ',' in entry:
for item in entry.split(','):
result.extend(self._range(item, low, high))
# Check for intervals, e.g. "*/2", "9-17/2"
elif '/' in entry:
spec_range, interval = entry.split('/')
logger.debug('Cron spec interval {} {}'.format(entry, interval))
result = self._range(spec_range, low, high)[::int(interval)]
# Check for numeric ranges, e.g. "9-17"
elif '-' in entry:
spec_low, spec_high = entry.split('-')
result = self._range('*', int(spec_low), int(spec_high))
# Process single value
else:
if entry == '*':
item_range = list(range(low, high + 1))
else:
item = int(entry)
if item > high: # entry above range
item = high # truncate value to highest possible
item_range.append(item)
for entry in item_range:
result.append('{:02d}'.format(entry))
return result
def _day_range(self, days):
"""
inspect a given string with days given as integer numbers separated by ","
:param days:
:return: an array with strings containing the days of month
"""
now = datetime.date.today()
wdays = [MO, TU, WE, TH, FR, SA, SU]
result = []
for day in days.split(','):
wday = wdays[int(day)]
# add next weekday occurrence
day = now + dateutil.relativedelta.relativedelta(weekday=wday)
result.append(day.strftime("%d"))
# safety add-on if weekday equals todays weekday
day = now + dateutil.relativedelta.relativedelta(weekday=wday(+2))
result.append(day.strftime("%d"))
return result
| gpl-3.0 |
jdreaver/vispy | examples/demo/gloo/imshow.py | 18 | 3866 | # -*- coding: utf-8 -*-
# vispy: gallery 10
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Show an image using gloo.
"""
import numpy as np
from vispy import app
from vispy.gloo import clear, set_clear_color, set_viewport, Program
# Image
def func(x, y):
return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
x = np.linspace(-3.0, 3.0, 512).astype(np.float32)
y = np.linspace(-3.0, 3.0, 512).astype(np.float32)
X, Y = np.meshgrid(x, y)
I = func(X, Y)
# Image normalization
vmin, vmax = I.min(), I.max()
I = (I-vmin)/(vmax-vmin)
# Colormaps
colormaps = np.ones((16, 512, 4)).astype(np.float32)
values = np.linspace(0, 1, 512)[1:-1]
# Hot colormap
colormaps[0, 0] = 0, 0, 1, 1 # Low values (< vmin)
colormaps[0, -1] = 0, 1, 0, 1 # High values (> vmax)
colormaps[0, 1:-1, 0] = np.interp(values, [0.00, 0.33, 0.66, 1.00],
[0.00, 1.00, 1.00, 1.00])
colormaps[0, 1:-1, 1] = np.interp(values, [0.00, 0.33, 0.66, 1.00],
[0.00, 0.00, 1.00, 1.00])
colormaps[0, 1:-1, 2] = np.interp(values, [0.00, 0.33, 0.66, 1.00],
[0.00, 0.00, 0.00, 1.00])
# Grey colormap
colormaps[1, 0] = 0, 0, 1, 1 # Low values (< vmin)
colormaps[1, -1] = 0, 1, 0, 1 # High values (> vmax)
colormaps[1, 1:-1, 0] = np.interp(values, [0.00, 1.00],
[0.00, 1.00])
colormaps[1, 1:-1, 1] = np.interp(values, [0.00, 1.00],
[0.00, 1.00])
colormaps[1, 1:-1, 2] = np.interp(values, [0.00, 1.00],
[0.00, 1.00])
# Jet colormap
# ...
img_vertex = """
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = vec4(position, 0.0, 1.0 );
v_texcoord = texcoord;
}
"""
img_fragment = """
uniform float vmin;
uniform float vmax;
uniform float cmap;
uniform sampler2D image;
uniform sampler2D colormaps;
uniform vec2 colormaps_shape;
varying vec2 v_texcoord;
void main()
{
float value = texture2D(image, v_texcoord).r;
float index = (cmap+0.5) / colormaps_shape.y;
if( value < vmin ) {
gl_FragColor = texture2D(colormaps, vec2(0.0,index));
} else if( value > vmax ) {
gl_FragColor = texture2D(colormaps, vec2(1.0,index));
} else {
value = (value-vmin)/(vmax-vmin);
value = 1.0/512.0 + 510.0/512.0*value;
gl_FragColor = texture2D(colormaps, vec2(value,index));
}
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(512, 512),
keys='interactive')
self.image = Program(img_vertex, img_fragment, 4)
self.image['position'] = (-1, -1), (-1, +1), (+1, -1), (+1, +1)
self.image['texcoord'] = (0, 0), (0, +1), (+1, 0), (+1, +1)
self.image['vmin'] = +0.1
self.image['vmax'] = +0.9
self.image['cmap'] = 0 # Colormap index to use
self.image['colormaps'] = colormaps
self.image['colormaps'].interpolation = 'linear'
self.image['colormaps_shape'] = colormaps.shape[1], colormaps.shape[0]
self.image['image'] = I.astype('float32')
self.image['image'].interpolation = 'linear'
set_clear_color('black')
self.show()
def on_resize(self, event):
width, height = event.physical_size
set_viewport(0, 0, *event.physical_size)
def on_draw(self, event):
clear(color=True, depth=True)
self.image.draw('triangle_strip')
if __name__ == '__main__':
canvas = Canvas()
app.run()
| bsd-3-clause |
chme/plugin.audio.mpdclient2 | mpdclient/Env.py | 1 | 1666 | #
# Copyright (c) chme
#
# This file is part of the mpdclient kodi plugin
#
# This plugin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of
# the License, or (at your option) any later version.
#
# This plugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import sys
import urlparse
import xbmcaddon
class Env:
def __init__(self):
self.__addon = xbmcaddon.Addon("plugin.audio.mpdclient2")
self.__addon_handle = int(sys.argv[1])
self.__base_url = sys.argv[0]
self.__addon_args = urlparse.parse_qs(sys.argv[2][1:])
print sys.argv
def base_url(self):
return self.__base_url
def addon_handle(self):
return self.__addon_handle
def param_string(self, name, default=""):
param = self.__addon_args.get(name, None)
if param is None:
return default
return param[0]
def param_list(self, name, default=[]):
param = self.__addon_args.get(name, default)
print param
return param
def localized(self, stringid):
return self.__addon.getLocalizedString(stringid)
def setting(self, name):
return self.__addon.getSetting(name)
| gpl-3.0 |
svenstaro/ansible | contrib/inventory/vmware_inventory.py | 7 | 25984 | #!/usr/bin/env python
# Requirements
# - pyvmomi >= 6.0.0.2016.4
# TODO:
# * more jq examples
# * optional folder heriarchy
"""
$ jq '._meta.hostvars[].config' data.json | head
{
"alternateguestname": "",
"instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
"memoryhotaddenabled": false,
"guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
"changeversion": "2016-05-16T18:43:14.977925Z",
"uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
"cpuhotremoveenabled": false,
"vpmcenabled": false,
"firmware": "bios",
"""
from __future__ import print_function
import argparse
import atexit
import datetime
import getpass
import os
import re
import six
import ssl
import sys
import uuid
from collections import defaultdict
from six.moves import configparser
from time import time
from jinja2 import Environment
HAS_PYVMOMI = False
try:
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
HAS_PYVMOMI = True
except ImportError:
pass
try:
import json
except ImportError:
import simplejson as json
hasvcr = False
try:
import vcr
hasvcr = True
except ImportError:
pass
def regex_match(s, pattern):
'''Custom filter for regex matching'''
reg = re.compile(pattern)
if reg.match(s):
return True
else:
return False
class VMwareMissingHostException(Exception):
pass
class VMWareInventory(object):
__name__ = 'VMWareInventory'
guest_props = False
instances = []
debug = False
load_dumpfile = None
write_dumpfile = None
maxlevel = 1
lowerkeys = True
config = None
cache_max_age = None
cache_path_cache = None
cache_path_index = None
cache_dir = None
server = None
port = None
username = None
password = None
validate_certs = True
host_filters = []
skip_keys = []
groupby_patterns = []
if sys.version_info > (3, 0):
safe_types = [int, bool, str, float, None]
else:
safe_types = [int, long, bool, str, float, None]
iter_types = [dict, list]
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
vimTableMaxDepth = {
"vim.HostSystem": 2,
"vim.VirtualMachine": 2,
}
custom_fields = {}
# use jinja environments to allow for custom filters
env = Environment()
env.filters['regex_match'] = regex_match
# translation table for attributes to fetch for known vim types
if not HAS_PYVMOMI:
vimTable = {}
else:
vimTable = {
vim.Datastore: ['_moId', 'name'],
vim.ResourcePool: ['_moId', 'name'],
vim.HostSystem: ['_moId', 'name'],
}
@staticmethod
def _empty_inventory():
return {"_meta": {"hostvars": {}}}
def __init__(self, load=True):
self.inventory = VMWareInventory._empty_inventory()
if load:
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Check the cache
cache_valid = self.is_cache_valid()
# Handle Cache
if self.args.refresh_cache or not cache_valid:
self.do_api_calls_update_cache()
else:
self.debugl('loading inventory from cache')
self.inventory = self.get_inventory_from_cache()
def debugl(self, text):
if self.args.debug:
try:
text = str(text)
except UnicodeEncodeError:
text = text.encode('ascii', 'ignore')
print('%s %s' % (datetime.datetime.now(), text))
def show(self):
# Data to print
self.debugl('dumping results')
data_to_print = None
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.inventory
return json.dumps(data_to_print, indent=2)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
valid = False
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
valid = True
return valid
def do_api_calls_update_cache(self):
''' Get instances and cache the data '''
self.inventory = self.instances_to_inventory(self.get_instances())
self.write_to_cache(self.inventory)
def write_to_cache(self, data):
''' Dump inventory to json file '''
with open(self.cache_path_cache, 'wb') as f:
f.write(json.dumps(data))
def get_inventory_from_cache(self):
''' Read in jsonified inventory '''
jdata = None
with open(self.cache_path_cache, 'rb') as f:
jdata = f.read()
return json.loads(jdata)
def read_settings(self):
''' Reads the settings from the vmware_inventory.ini file '''
scriptbasename = __file__
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {'vmware': {
'server': '',
'port': 443,
'username': '',
'password': '',
'validate_certs': True,
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
'cache_name': 'ansible-vmware',
'cache_path': '~/.ansible/tmp',
'cache_max_age': 3600,
'max_object_level': 1,
'skip_keys': 'declaredalarmstate,'
'disabledmethod,'
'dynamicproperty,'
'dynamictype,'
'environmentbrowser,'
'managedby,'
'parent,'
'childtype,'
'resourceconfig',
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
'host_pattern': '{{ guest.ipaddress }}',
'host_filters': '{{ guest.gueststate == "running" }}',
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
'lower_var_keys': True,
'custom_field_group_prefix': 'vmware_tag_',
'groupby_custom_field': False}
}
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
# where is the config?
vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
config.read(vmware_ini_path)
# apply defaults
for k, v in defaults['vmware'].items():
if not config.has_option('vmware', k):
config.set('vmware', k, str(v))
# where is the cache?
self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
if self.cache_dir and not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
# set the cache filename and max age
cache_name = config.get('vmware', 'cache_name')
self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
self.debugl('cache path is %s' % self.cache_path_cache)
self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
# mark the connection info
self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
self.debugl('server is %s' % self.server)
self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
self.debugl('username is %s' % self.username)
self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password'))
self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
if self.validate_certs in ['no', 'false', 'False', False]:
self.validate_certs = False
self.debugl('cert validation is %s' % self.validate_certs)
# behavior control
self.maxlevel = int(config.get('vmware', 'max_object_level'))
self.debugl('max object level is %s' % self.maxlevel)
self.lowerkeys = config.get('vmware', 'lower_var_keys')
if type(self.lowerkeys) != bool:
if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
self.lowerkeys = True
else:
self.lowerkeys = False
self.debugl('lower keys is %s' % self.lowerkeys)
self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
self.debugl('skip keys is %s' % self.skip_keys)
self.host_filters = list(config.get('vmware', 'host_filters').split(','))
self.debugl('host filters are %s' % self.host_filters)
self.groupby_patterns = list(config.get('vmware', 'groupby_patterns').split(','))
self.debugl('groupby patterns are %s' % self.groupby_patterns)
# Special feature to disable the brute force serialization of the
# virtulmachine objects. The key name for these properties does not
# matter because the values are just items for a larger list.
if config.has_section('properties'):
self.guest_props = []
for prop in config.items('properties'):
self.guest_props.append(prop[1])
# save the config
self.config = config
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
parser.add_argument('--debug', action='store_true', default=False,
help='show debug info')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
parser.add_argument('--max-instances', default=None, type=int,
help='maximum number of instances to retrieve')
self.args = parser.parse_args()
def get_instances(self):
''' Get a list of vm instances with pyvmomi '''
kwargs = {'host': self.server,
'user': self.username,
'pwd': self.password,
'port': int(self.port)}
if hasattr(ssl, 'SSLContext') and not self.validate_certs:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
kwargs['sslContext'] = context
return self._get_instances(kwargs)
def _get_instances(self, inkwargs):
''' Make API calls '''
instances = []
si = SmartConnect(**inkwargs)
self.debugl('retrieving all instances')
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Create a search container for virtualmachines
self.debugl('creating containerview for virtualmachines')
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
children = containerView.view
for child in children:
# If requested, limit the total number of instances
if self.args.max_instances:
if len(instances) >= self.args.max_instances:
break
instances.append(child)
self.debugl("%s total instances in container view" % len(instances))
if self.args.host:
instances = [x for x in instances if x.name == self.args.host]
instance_tuples = []
for instance in sorted(instances):
if self.guest_props:
ifacts = self.facts_from_proplist(instance)
else:
ifacts = self.facts_from_vobj(instance)
instance_tuples.append((instance, ifacts))
self.debugl('facts collected for all instances')
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl('%d custom fieds collected' % len(self.custom_fields))
return instance_tuples
def instances_to_inventory(self, instances):
''' Convert a list of vm objects into a json compliant inventory '''
self.debugl('re-indexing instances based on ini settings')
inventory = VMWareInventory._empty_inventory()
inventory['all'] = {}
inventory['all']['hosts'] = []
for idx, instance in enumerate(instances):
# make a unique id for this object to avoid vmware's
# numerous uuid's which aren't all unique.
thisid = str(uuid.uuid4())
idata = instance[1]
# Put it in the inventory
inventory['all']['hosts'].append(thisid)
inventory['_meta']['hostvars'][thisid] = idata.copy()
inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
# Make a map of the uuid to the alias the user wants
name_mapping = self.create_template_mapping(
inventory,
self.config.get('vmware', 'alias_pattern')
)
# Make a map of the uuid to the ssh hostname the user wants
host_mapping = self.create_template_mapping(
inventory,
self.config.get('vmware', 'host_pattern')
)
# Reset the inventory keys
for k, v in name_mapping.items():
if not host_mapping or not k in host_mapping:
continue
# set ansible_host (2.x)
try:
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
# 1.9.x backwards compliance
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
except Exception:
continue
if k == v:
continue
# add new key
inventory['all']['hosts'].append(v)
inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k]
# cleanup old key
inventory['all']['hosts'].remove(k)
inventory['_meta']['hostvars'].pop(k, None)
self.debugl('pre-filtered hosts:')
for i in inventory['all']['hosts']:
self.debugl(' * %s' % i)
# Apply host filters
for hf in self.host_filters:
if not hf:
continue
self.debugl('filter: %s' % hf)
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
for k, v in filter_map.items():
if not v:
# delete this host
inventory['all']['hosts'].remove(k)
inventory['_meta']['hostvars'].pop(k, None)
self.debugl('post-filter hosts:')
for i in inventory['all']['hosts']:
self.debugl(' * %s' % i)
# Create groups
for gbp in self.groupby_patterns:
groupby_map = self.create_template_mapping(inventory, gbp)
for k, v in groupby_map.items():
if v not in inventory:
inventory[v] = {}
inventory[v]['hosts'] = []
if k not in inventory[v]['hosts']:
inventory[v]['hosts'].append(k)
if self.config.get('vmware', 'groupby_custom_field'):
for k, v in inventory['_meta']['hostvars'].items():
if 'customvalue' in v:
for tv in v['customvalue']:
if not isinstance(tv['value'], str) and not isinstance(tv['value'], unicode):
continue
newkey = None
field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key']
values = []
keylist = map(lambda x: x.strip(), tv['value'].split(','))
for kl in keylist:
try:
newkey = self.config.get('vmware', 'custom_field_group_prefix') + field_name + '_' + kl
newkey = newkey.strip()
except Exception as e:
self.debugl(e)
values.append(newkey)
for tag in values:
if not tag:
continue
if tag not in inventory:
inventory[tag] = {}
inventory[tag]['hosts'] = []
if k not in inventory[tag]['hosts']:
inventory[tag]['hosts'].append(k)
return inventory
def create_template_mapping(self, inventory, pattern, dtype='string'):
''' Return a hash of uuid to templated string from pattern '''
mapping = {}
for k, v in inventory['_meta']['hostvars'].items():
t = self.env.from_string(pattern)
newkey = None
try:
newkey = t.render(v)
newkey = newkey.strip()
except Exception as e:
self.debugl(e)
if not newkey:
continue
elif dtype == 'integer':
newkey = int(newkey)
elif dtype == 'boolean':
if newkey.lower() == 'false':
newkey = False
elif newkey.lower() == 'true':
newkey = True
elif dtype == 'string':
pass
mapping[k] = newkey
return mapping
def facts_from_proplist(self, vm):
'''Get specific properties instead of serializing everything'''
rdata = {}
for prop in self.guest_props:
self.debugl('getting %s property for %s' % (prop, vm.name))
key = prop
if self.lowerkeys:
key = key.lower()
if '.' not in prop:
# props without periods are direct attributes of the parent
rdata[key] = getattr(vm, prop)
else:
# props with periods are subkeys of parent attributes
parts = prop.split('.')
total = len(parts) - 1
# pointer to the current object
val = None
# pointer to the current result key
lastref = rdata
for idx, x in enumerate(parts):
# if the val wasn't set yet, get it from the parent
if not val:
val = getattr(vm, x)
else:
# in a subkey, get the subprop from the previous attrib
try:
val = getattr(val, x)
except AttributeError as e:
self.debugl(e)
# lowercase keys if requested
if self.lowerkeys:
x = x.lower()
# change the pointer or set the final value
if idx != total:
if x not in lastref:
lastref[x] = {}
lastref = lastref[x]
else:
lastref[x] = val
return rdata
def facts_from_vobj(self, vobj, level=0):
''' Traverse a VM object and return a json compliant data structure '''
# pyvmomi objects are not yet serializable, but may be one day ...
# https://github.com/vmware/pyvmomi/issues/21
# WARNING:
# Accessing an object attribute will trigger a SOAP call to the remote.
# Increasing the attributes collected or the depth of recursion greatly
# increases runtime duration and potentially memory+network utilization.
if level == 0:
try:
self.debugl("get facts for %s" % vobj.name)
except Exception as e:
self.debugl(e)
rdata = {}
methods = dir(vobj)
methods = [str(x) for x in methods if not x.startswith('_')]
methods = [x for x in methods if x not in self.bad_types]
methods = [x for x in methods if not x.lower() in self.skip_keys]
methods = sorted(methods)
for method in methods:
# Attempt to get the method, skip on fail
try:
methodToCall = getattr(vobj, method)
except Exception as e:
continue
# Skip callable methods
if callable(methodToCall):
continue
if self.lowerkeys:
method = method.lower()
rdata[method] = self._process_object_types(
methodToCall,
thisvm=vobj,
inkey=method,
)
return rdata
def _process_object_types(self, vobj, thisvm=None, inkey=None, level=0):
''' Serialize an object '''
rdata = {}
if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]:
return rdata
if vobj is None:
rdata = None
elif type(vobj) in self.vimTable:
rdata = {}
for key in self.vimTable[type(vobj)]:
try:
rdata[key] = getattr(vobj, key)
except Exception as e:
self.debugl(e)
elif issubclass(type(vobj), str) or isinstance(vobj, str):
if vobj.isalnum():
rdata = vobj
else:
rdata = vobj.decode('ascii', 'ignore')
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
rdata = vobj
elif issubclass(type(vobj), int) or isinstance(vobj, int):
rdata = vobj
elif issubclass(type(vobj), float) or isinstance(vobj, float):
rdata = vobj
elif issubclass(type(vobj), long) or isinstance(vobj, long):
rdata = vobj
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
rdata = []
try:
vobj = sorted(vobj)
except Exception:
pass
for idv, vii in enumerate(vobj):
if level + 1 <= self.maxlevel:
vid = self._process_object_types(
vii,
thisvm=thisvm,
inkey=inkey + '[' + str(idv) + ']',
level=(level + 1)
)
if vid:
rdata.append(vid)
elif issubclass(type(vobj), dict):
pass
elif issubclass(type(vobj), object):
methods = dir(vobj)
methods = [str(x) for x in methods if not x.startswith('_')]
methods = [x for x in methods if x not in self.bad_types]
methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys]
methods = sorted(methods)
for method in methods:
# Attempt to get the method, skip on fail
try:
methodToCall = getattr(vobj, method)
except Exception as e:
continue
if callable(methodToCall):
continue
if self.lowerkeys:
method = method.lower()
if level + 1 <= self.maxlevel:
try:
rdata[method] = self._process_object_types(
methodToCall,
thisvm=thisvm,
inkey=inkey + '.' + method,
level=(level + 1)
)
except vim.fault.NoPermission:
self.debugl("Skipping method %s (NoPermission)" % method)
else:
pass
return rdata
def get_host_info(self, host):
''' Return hostvars for a single host '''
if host in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][host]
elif self.args.host and self.inventory['_meta']['hostvars']:
match = None
for k, v in self.inventory['_meta']['hostvars']:
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
match = k
break
if match:
return self.inventory['_meta']['hostvars'][match]
else:
raise VMwareMissingHostException('%s not found' % host)
else:
raise VMwareMissingHostException('%s not found' % host)
if __name__ == "__main__":
# Run the script
print(VMWareInventory().show())
| gpl-3.0 |
rwl/muntjac | muntjac/ui/drag_and_drop_wrapper.py | 1 | 9479 | # @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
from muntjac.event.transferable_impl import TransferableImpl
from muntjac.event.dd.drag_source import IDragSource
from muntjac.event.dd.drop_target import IDropTarget
from muntjac.event.dd.target_details_impl import TargetDetailsImpl
from muntjac.ui.html5_file import Html5File
from muntjac.ui.custom_component import CustomComponent
from muntjac.terminal.gwt.client.mouse_event_details import MouseEventDetails
from muntjac.terminal.stream_variable import \
(IStreamVariable, IStreamingEndEvent, IStreamingErrorEvent,
IStreamingProgressEvent, IStreamingStartEvent)
from muntjac.terminal.gwt.client.ui.dd.horizontal_drop_location import \
HorizontalDropLocation
from muntjac.terminal.gwt.client.ui.dd.vertical_drop_location import \
VerticalDropLocation
class DragAndDropWrapper(CustomComponent, IDropTarget, IDragSource):
CLIENT_WIDGET = None #ClientWidget(VDragAndDropWrapper, LoadStyle.EAGER)
def __init__(self, root):
"""Wraps given component in a L{DragAndDropWrapper}.
@param root: the component to be wrapped
"""
super(DragAndDropWrapper, self).__init__(root)
self._receivers = dict()
self._dragStartMode = DragStartMode.NONE
self._dropHandler = None
def paintContent(self, target):
super(DragAndDropWrapper, self).paintContent(target)
target.addAttribute('dragStartMode',
DragStartMode.ordinal(self._dragStartMode))
if self.getDropHandler() is not None:
self.getDropHandler().getAcceptCriterion().paint(target)
if self._receivers is not None and len(self._receivers) > 0:
for idd, html5File in self._receivers.iteritems():
if html5File.getStreamVariable() is not None:
target.addVariable(self, 'rec-' + idd,
ProxyReceiver(html5File))
# these are cleaned from receivers once the upload
# has started
else:
# instructs the client side not to send the file
target.addVariable(self, 'rec-' + idd, None)
# forget the file from subsequent paints
del self._receivers[idd]
def getDropHandler(self):
return self._dropHandler
def setDropHandler(self, dropHandler):
self._dropHandler = dropHandler
self.requestRepaint()
def translateDropTargetDetails(self, clientVariables):
return WrapperTargetDetails(clientVariables, self)
def getTransferable(self, rawVariables):
return WrapperTransferable(self, rawVariables)
def setDragStartMode(self, dragStartMode):
self._dragStartMode = dragStartMode
self.requestRepaint()
def getDragStartMode(self):
return self._dragStartMode
class WrapperTransferable(TransferableImpl):
def __init__(self, sourceComponent, rawVariables):
super(WrapperTransferable, self).__init__(sourceComponent, rawVariables)
self._files = None
fc = rawVariables.get('filecount')
if fc is not None:
self._files = [None] * fc
for i in range(fc):
fd = Html5File(rawVariables.get('fn%d' % i), # name
rawVariables.get('fs%d' % i), # size
rawVariables.get('ft%d' % i)) # mime
idd = rawVariables.get('fi%d' % i)
self._files[i] = fd
self._sourceComponent._receivers[idd] = fd
self._sourceComponent.requestRepaint() # paint receivers
def getDraggedComponent(self):
"""The component in wrapper that is being dragged or null if the
transferrable is not a component (most likely an html5 drag).
"""
return self.getData('component')
def getMouseDownEvent(self):
"""@return: the mouse down event that started the drag and drop
operation
"""
return MouseEventDetails.deSerialize(self.getData('mouseDown'))
def getFiles(self):
return self._files
def getText(self):
data = self.getData('Text') # IE, html5
if data is None:
# check for "text/plain" (webkit)
data = self.getData('text/plain')
return data
def getHtml(self):
data = self.getData('Html') # IE, html5
if data is None:
# check for "text/plain" (webkit)
data = self.getData('text/html')
return data
class WrapperTargetDetails(TargetDetailsImpl):
def __init__(self, rawDropData, wrapper):
super(WrapperTargetDetails, self).__init__(rawDropData, wrapper)
def getAbsoluteLeft(self):
"""@return: the absolute position of wrapper on the page"""
return self.getData('absoluteLeft')
def getAbsoluteTop(self):
"""@return: the absolute position of wrapper on the page"""
return self.getData('absoluteTop')
def getMouseEvent(self):
"""@return: details about the actual event that caused the event
details. Practically mouse move or mouse up.
"""
return MouseEventDetails.deSerialize(self.getData('mouseEvent'))
def getVerticalDropLocation(self):
"""@return: a detail about the drags vertical position over the
wrapper.
"""
data = self.getData('verticalLocation')
return VerticalDropLocation.valueOf[data]
def getHorizontalDropLocation(self):
"""@return: a detail about the drags horizontal position over the
wrapper.
"""
data = self.getData('horizontalLocation')
return HorizontalDropLocation.valueOf[data]
def verticalDropLocation(self):
"""@deprecated: use L{getVerticalDropLocation} instead"""
return self.getVerticalDropLocation()
def horizontalDropLocation(self):
"""@deprecated: use L{getHorizontalDropLocation} instead"""
return self.getHorizontalDropLocation()
class DragStartMode(object):
#: L{DragAndDropWrapper} does not start drag events at all
NONE = 'NONE'
#: The component on which the drag started will be shown as drag image.
COMPONENT = 'COMPONENT'
#: The whole wrapper is used as a drag image when dragging.
WRAPPER = 'WRAPPER'
_values = [NONE, COMPONENT, WRAPPER]
@classmethod
def values(cls):
return cls._values[:]
@classmethod
def ordinal(cls, val):
return cls._values.index(val)
class ProxyReceiver(IStreamVariable):
def __init__(self, fd):
self._file = fd
self._listenProgressOfUploadedFile = None
def getOutputStream(self):
if self._file.getStreamVariable() is None:
return None
return self._file.getStreamVariable().getOutputStream()
def listenProgress(self):
return self._file.getStreamVariable().listenProgress()
def onProgress(self, event):
wrapper = ReceivingEventWrapper(event, self._file, self)
self._file.getStreamVariable().onProgress(wrapper)
def streamingStarted(self, event):
self._listenProgressOfUploadedFile = \
self._file.getStreamVariable() is not None
if self._listenProgressOfUploadedFile:
wrapper = ReceivingEventWrapper(event, self._file, self)
self._file.getStreamVariable().streamingStarted(wrapper)
# no need tell to the client about this receiver on next paint
self.receivers.remove(self._file)
# let the terminal GC the stream variable and not to accept
# other file uploads to this variable
event.disposeStreamVariable()
def streamingFinished(self, event):
if self._listenProgressOfUploadedFile:
wrapper = ReceivingEventWrapper(event, self._file, self)
self._file.getStreamVariable().streamingFinished(wrapper)
def streamingFailed(self, event):
if self._listenProgressOfUploadedFile:
wrapper = ReceivingEventWrapper(event, self._file, self)
self._file.getStreamVariable().streamingFailed(wrapper)
def isInterrupted(self):
return self._file.getStreamVariable().isInterrupted()
class ReceivingEventWrapper(IStreamingErrorEvent, IStreamingEndEvent,
IStreamingStartEvent, IStreamingProgressEvent):
# With XHR2 file posts we can't provide as much information from the
# terminal as with multipart request. This helper class wraps the
# terminal event and provides the lacking information from the
# Html5File.
def __init__(self, e, fd, receiver):
self._wrappedEvent = e
self._file = fd
self._receiver = receiver
def getMimeType(self):
return self._file.getType()
def getFileName(self):
return self._file.getFileName()
def getContentLength(self):
return self._file.getFileSize()
def getReceiver(self):
return self._receiver
def getException(self):
if isinstance(self._wrappedEvent, IStreamingErrorEvent):
return self._wrappedEvent.getException()
return None
def getBytesReceived(self):
return self._wrappedEvent.getBytesReceived()
def disposeStreamVariable(self):
"""Calling this method has no effect. DD files are receive only
once anyway.
"""
pass
| apache-2.0 |
axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/google/protobuf/internal/message_set_extensions_pb2.py | 42 | 8373 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/message_set_extensions.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/message_set_extensions.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MESSAGE_SET_EXTENSION3_FIELD_NUMBER = 98418655
message_set_extension3 = _descriptor.FieldDescriptor(
name='message_set_extension3', full_name='google.protobuf.internal.message_set_extension3', index=0,
number=98418655, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_TESTMESSAGESET = _descriptor.Descriptor(
name='TestMessageSet',
full_name='google.protobuf.internal.TestMessageSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001')),
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 2147483647), ],
oneofs=[
],
serialized_start=83,
serialized_end=113,
)
_TESTMESSAGESETEXTENSION1 = _descriptor.Descriptor(
name='TestMessageSetExtension1',
full_name='google.protobuf.internal.TestMessageSetExtension1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='i', full_name='google.protobuf.internal.TestMessageSetExtension1.i', index=0,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='google.protobuf.internal.TestMessageSetExtension1.message_set_extension', index=0,
number=98418603, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=281,
)
_TESTMESSAGESETEXTENSION2 = _descriptor.Descriptor(
name='TestMessageSetExtension2',
full_name='google.protobuf.internal.TestMessageSetExtension2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='str', full_name='google.protobuf.internal.TestMessageSetExtension2.str', index=0,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='message_set_extension', full_name='google.protobuf.internal.TestMessageSetExtension2.message_set_extension', index=0,
number=98418634, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=284,
serialized_end=451,
)
_TESTMESSAGESETEXTENSION3 = _descriptor.Descriptor(
name='TestMessageSetExtension3',
full_name='google.protobuf.internal.TestMessageSetExtension3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='google.protobuf.internal.TestMessageSetExtension3.text', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=493,
)
DESCRIPTOR.message_types_by_name['TestMessageSet'] = _TESTMESSAGESET
DESCRIPTOR.message_types_by_name['TestMessageSetExtension1'] = _TESTMESSAGESETEXTENSION1
DESCRIPTOR.message_types_by_name['TestMessageSetExtension2'] = _TESTMESSAGESETEXTENSION2
DESCRIPTOR.message_types_by_name['TestMessageSetExtension3'] = _TESTMESSAGESETEXTENSION3
DESCRIPTOR.extensions_by_name['message_set_extension3'] = message_set_extension3
TestMessageSet = _reflection.GeneratedProtocolMessageType('TestMessageSet', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESET,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSet)
))
_sym_db.RegisterMessage(TestMessageSet)
TestMessageSetExtension1 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension1', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION1,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension1)
))
_sym_db.RegisterMessage(TestMessageSetExtension1)
TestMessageSetExtension2 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension2', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION2,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension2)
))
_sym_db.RegisterMessage(TestMessageSetExtension2)
TestMessageSetExtension3 = _reflection.GeneratedProtocolMessageType('TestMessageSetExtension3', (_message.Message,), dict(
DESCRIPTOR = _TESTMESSAGESETEXTENSION3,
__module__ = 'google.protobuf.internal.message_set_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestMessageSetExtension3)
))
_sym_db.RegisterMessage(TestMessageSetExtension3)
message_set_extension3.message_type = _TESTMESSAGESETEXTENSION3
TestMessageSet.RegisterExtension(message_set_extension3)
_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'].message_type = _TESTMESSAGESETEXTENSION1
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'])
_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'].message_type = _TESTMESSAGESETEXTENSION2
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'])
_TESTMESSAGESET.has_options = True
_TESTMESSAGESET._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\010\001'))
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
nanolearning/edx-platform | lms/envs/cms/microsite_test.py | 51 | 1387 | """
This is a localdev test for the Microsite processing pipeline
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
from ..dev import ENV_ROOT, FEATURES
MICROSITE_CONFIGURATION = {
"openedx": {
"domain_prefix": "openedx",
"university": "openedx",
"platform_name": "Open edX",
"logo_image_url": "openedx/images/header-logo.png",
"email_from_address": "[email protected]",
"payment_support_email": "[email protected]",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "openedx.localhost",
"course_org_filter": "CDX",
"course_about_show_social_links": False,
"css_overrides_file": "openedx/css/openedx.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "Explore free courses from leading universities.",
"course_index_overlay_logo_file": "openedx/images/header-logo.png",
"homepage_overlay_html": "<h1>Take an Open edX Course</h1>"
}
}
MICROSITE_ROOT_DIR = ENV_ROOT / 'edx-microsite'
# pretend we are behind some marketing site, we want to be able to assert that the Microsite config values override
# this global setting
FEATURES['ENABLE_MKTG_SITE'] = True
FEATURES['USE_MICROSITES'] = True
| agpl-3.0 |
marcelocure/django | tests/test_runner/test_discover_runner.py | 210 | 6196 | import os
from contextlib import contextmanager
from unittest import TestSuite, TextTestRunner, defaultTestLoader
from django.test import TestCase
from django.test.runner import DiscoverRunner
@contextmanager
def change_cwd(directory):
current_dir = os.path.abspath(os.path.dirname(__file__))
new_dir = os.path.join(current_dir, directory)
old_cwd = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_cwd)
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 4)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
with change_cwd(".."):
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 5)
def test_empty_label(self):
"""
If the test label is empty, discovery should happen on the current
working directory.
"""
with change_cwd("."):
suite = DiscoverRunner().build_suite([])
self.assertEqual(
suite._tests[0].id().split(".")[0],
os.path.basename(os.getcwd()),
)
def test_empty_test_case(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.EmptyTestCase"],
).countTestCases()
self.assertEqual(count, 0)
def test_discovery_on_package(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests"],
).countTestCases()
self.assertEqual(count, 1)
def test_ignore_adjacent(self):
"""
When given a dotted path to a module, unittest discovery searches
not just the module, but also the directory containing the module.
This results in tests from adjacent modules being run when they
should not. The discover runner avoids this behavior.
"""
count = DiscoverRunner().build_suite(
["test_discovery_sample.empty"],
).countTestCases()
self.assertEqual(count, 0)
def test_testcase_ordering(self):
with change_cwd(".."):
suite = DiscoverRunner().build_suite(["test_discovery_sample/"])
self.assertEqual(
suite._tests[0].__class__.__name__,
'TestDjangoTestCase',
msg="TestDjangoTestCase should be the first test case")
self.assertEqual(
suite._tests[1].__class__.__name__,
'TestZimpleTestCase',
msg="TestZimpleTestCase should be the second test case")
# All others can follow in unspecified order, including doctests
self.assertIn('DocTestCase', [t.__class__.__name__ for t in suite._tests[2:]])
def test_duplicates_ignored(self):
"""
Tests shouldn't be discovered twice when discovering on overlapping paths.
"""
base_app = 'gis_tests'
sub_app = 'gis_tests.geo3d'
with self.modify_settings(INSTALLED_APPS={'append': sub_app}):
single = DiscoverRunner().build_suite([base_app]).countTestCases()
dups = DiscoverRunner().build_suite([base_app, sub_app]).countTestCases()
self.assertEqual(single, dups)
def test_reverse(self):
"""
Reverse should reorder tests while maintaining the grouping specified
by ``DiscoverRunner.reorder_by``.
"""
runner = DiscoverRunner(reverse=True)
suite = runner.build_suite(
test_labels=('test_discovery_sample', 'test_discovery_sample2'))
self.assertIn('test_discovery_sample2', next(iter(suite)).id(),
msg="Test labels should be reversed.")
suite = runner.build_suite(test_labels=('test_discovery_sample2',))
suite = tuple(suite)
self.assertIn('DjangoCase', suite[0].id(),
msg="Test groups should not be reversed.")
self.assertIn('SimpleCase', suite[4].id(),
msg="Test groups order should be preserved.")
self.assertIn('DjangoCase2', suite[0].id(),
msg="Django test cases should be reversed.")
self.assertIn('SimpleCase2', suite[4].id(),
msg="Simple test cases should be reversed.")
self.assertIn('UnittestCase2', suite[8].id(),
msg="Unittest test cases should be reversed.")
self.assertIn('test_2', suite[0].id(),
msg="Methods of Django cases should be reversed.")
self.assertIn('test_2', suite[4].id(),
msg="Methods of simple cases should be reversed.")
self.assertIn('test_2', suite[8].id(),
msg="Methods of unittest cases should be reversed.")
def test_overrideable_test_suite(self):
self.assertEqual(DiscoverRunner().test_suite, TestSuite)
def test_overrideable_test_runner(self):
self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def test_overrideable_test_loader(self):
self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
| bsd-3-clause |
sdgdsffdsfff/task-worker | src/python/Sailing/libs/selenium/webdriver/support/event_firing_webdriver.py | 2 | 12560 | #!/usr/bin/python
#
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .abstract_event_listener import AbstractEventListener
def _wrap_elements(result, ef_driver):
if isinstance(result, WebElement):
return EventFiringWebElement(result, ef_driver)
elif isinstance(result, list):
return [_wrap_elements(item, ef_driver) for item in result]
else:
return result
class EventFiringWebDriver(object):
"""
A wrapper around an arbitrary WebDriver instance which supports firing events
"""
def __init__(self, driver, event_listener):
"""
Creates a new instance of the EventFiringWebDriver
:Args:
- driver : A WebDriver instance
- event_listener : Instance of a class that subclasses AbstractEventListener and implements it fully or partially
Example:
.. code-block:: python
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
print("Before navigate to %s" % url)
def after_navigate_to(self, url, driver):
print("After navigate to %s" % url)
driver = Firefox()
ef_driver = EventFiringWebDriver(driver, MyListener())
ef_driver.get("http://www.google.co.in/")
"""
if not isinstance(driver, WebDriver):
raise WebDriverException("A WebDriver instance must be supplied")
if not isinstance(event_listener, AbstractEventListener):
raise WebDriverException("Event listener must be a subclass of AbstractEventListener")
self._driver = driver
self._listener = event_listener
@property
def wrapped_driver(self):
"""Returns the WebDriver instance wrapped by this EventsFiringWebDriver"""
return self._driver
def get(self, url):
self._dispatch("navigate_to", (url, self._driver), "get", (url, ))
def back(self):
self._dispatch("navigate_back", (self._driver,), "back", ())
def forward(self):
self._dispatch("navigate_forward", (self._driver,), "forward", ())
def execute_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_script", unwrapped_args)
def execute_async_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_async_script", unwrapped_args)
def close(self):
self._dispatch("close", (self._driver,), "close", ())
def quit(self):
self._dispatch("quit", (self._driver,), "quit", ())
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._driver, d_call)(*d_args)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self)
def _unwrap_element_args(self, args):
if isinstance(args, EventFiringWebElement):
return args.wrapped_element
elif isinstance(args, tuple):
return tuple([self._unwrap_element_args(item) for item in args])
elif isinstance(args, list):
return [self._unwrap_element_args(item) for item in args]
else:
return args
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._driver, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._driver, item, value)
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self)
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._driver, name):
try:
attrib = getattr(self._driver, name)
if not callable(attrib):
return attrib
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
class EventFiringWebElement(object):
""""
A wrapper around WebElement instance which supports firing events
"""
def __init__(self, webelement, ef_driver):
"""
Creates a new instance of the EventFiringWebElement
"""
self._webelement = webelement
self._ef_driver = ef_driver
self._driver = ef_driver.wrapped_driver
self._listener = ef_driver._listener
@property
def wrapped_element(self):
"""Returns the WebElement wrapped by this EventFiringWebElement instance"""
return self._webelement
def click(self):
self._dispatch("click", (self._webelement, self._driver), "click", ())
def clear(self):
self._dispatch("change_value_of", (self._webelement, self._driver), "clear", ())
def send_keys(self, *value):
self._dispatch("change_value_of", (self._webelement, self._driver), "send_keys", value)
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._webelement, d_call)(*d_args)
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self._ef_driver)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._webelement, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._webelement, item, value)
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self._ef_driver)
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._webelement, name):
try:
attrib = getattr(self._webelement, name)
if not callable(attrib):
return attrib
except Exception , e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
| bsd-2-clause |
QLGu/django-oscar | src/oscar/apps/basket/forms.py | 26 | 10178 | from django import forms
from django.conf import settings
from django.forms.models import modelformset_factory, BaseModelFormSet
from django.db.models import Sum
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_model
from oscar.forms import widgets
Line = get_model('basket', 'line')
Basket = get_model('basket', 'basket')
Product = get_model('catalogue', 'product')
class BasketLineForm(forms.ModelForm):
save_for_later = forms.BooleanField(
initial=False, required=False, label=_('Save for Later'))
def __init__(self, strategy, *args, **kwargs):
super(BasketLineForm, self).__init__(*args, **kwargs)
self.instance.strategy = strategy
def clean_quantity(self):
qty = self.cleaned_data['quantity']
if qty > 0:
self.check_max_allowed_quantity(qty)
self.check_permission(qty)
return qty
def check_max_allowed_quantity(self, qty):
is_allowed, reason = self.instance.basket.is_quantity_allowed(qty)
if not is_allowed:
raise forms.ValidationError(reason)
def check_permission(self, qty):
policy = self.instance.purchase_info.availability
is_available, reason = policy.is_purchase_permitted(
quantity=qty)
if not is_available:
raise forms.ValidationError(reason)
class Meta:
model = Line
fields = ['quantity']
class BaseBasketLineFormSet(BaseModelFormSet):
def __init__(self, strategy, *args, **kwargs):
self.strategy = strategy
super(BaseBasketLineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
return super(BaseBasketLineFormSet, self)._construct_form(
i, strategy=self.strategy, **kwargs)
def _should_delete_form(self, form):
"""
Quantity of zero is treated as if the user checked the DELETE checkbox,
which results in the basket line being deleted
"""
if super(BaseBasketLineFormSet, self)._should_delete_form(form):
return True
if self.can_delete and 'quantity' in form.cleaned_data:
return form.cleaned_data['quantity'] == 0
BasketLineFormSet = modelformset_factory(
Line, form=BasketLineForm, formset=BaseBasketLineFormSet, extra=0,
can_delete=True)
class SavedLineForm(forms.ModelForm):
move_to_basket = forms.BooleanField(initial=False, required=False,
label=_('Move to Basket'))
class Meta:
model = Line
fields = ('id', 'move_to_basket')
def __init__(self, strategy, basket, *args, **kwargs):
self.strategy = strategy
self.basket = basket
super(SavedLineForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(SavedLineForm, self).clean()
if not cleaned_data['move_to_basket']:
# skip further validation (see issue #666)
return cleaned_data
# Get total quantity of all lines with this product (there's normally
# only one but there can be more if you allow product options).
lines = self.basket.lines.filter(product=self.instance.product)
current_qty = lines.aggregate(Sum('quantity'))['quantity__sum'] or 0
desired_qty = current_qty + self.instance.quantity
result = self.strategy.fetch_for_product(self.instance.product)
is_available, reason = result.availability.is_purchase_permitted(
quantity=desired_qty)
if not is_available:
raise forms.ValidationError(reason)
return cleaned_data
class BaseSavedLineFormSet(BaseModelFormSet):
def __init__(self, strategy, basket, *args, **kwargs):
self.strategy = strategy
self.basket = basket
super(BaseSavedLineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
return super(BaseSavedLineFormSet, self)._construct_form(
i, strategy=self.strategy, basket=self.basket, **kwargs)
SavedLineFormSet = modelformset_factory(Line, form=SavedLineForm,
formset=BaseSavedLineFormSet, extra=0,
can_delete=True)
class BasketVoucherForm(forms.Form):
code = forms.CharField(max_length=128, label=_('Code'))
def __init__(self, *args, **kwargs):
super(BasketVoucherForm, self).__init__(*args, **kwargs)
def clean_code(self):
return self.cleaned_data['code'].strip().upper()
class AddToBasketForm(forms.Form):
quantity = forms.IntegerField(initial=1, min_value=1, label=_('Quantity'))
def __init__(self, basket, product, *args, **kwargs):
# Note, the product passed in here isn't necessarily the product being
# added to the basket. For child products, it is the *parent* product
# that gets passed to the form. An optional product_id param is passed
# to indicate the ID of the child product being added to the basket.
self.basket = basket
self.parent_product = product
super(AddToBasketForm, self).__init__(*args, **kwargs)
# Dynamically build fields
if product.is_parent:
self._create_parent_product_fields(product)
self._create_product_fields(product)
# Dynamic form building methods
def _create_parent_product_fields(self, product):
"""
Adds the fields for a "group"-type product (eg, a parent product with a
list of children.
Currently requires that a stock record exists for the children
"""
choices = []
disabled_values = []
for child in product.children.all():
# Build a description of the child, including any pertinent
# attributes
attr_summary = child.attribute_summary
if attr_summary:
summary = attr_summary
else:
summary = child.get_title()
# Check if it is available to buy
info = self.basket.strategy.fetch_for_product(child)
if not info.availability.is_available_to_buy:
disabled_values.append(child.id)
choices.append((child.id, summary))
self.fields['child_id'] = forms.ChoiceField(
choices=tuple(choices), label=_("Variant"),
widget=widgets.AdvancedSelect(disabled_values=disabled_values))
def _create_product_fields(self, product):
"""
Add the product option fields.
"""
for option in product.options:
self._add_option_field(product, option)
def _add_option_field(self, product, option):
"""
Creates the appropriate form field for the product option.
This is designed to be overridden so that specific widgets can be used
for certain types of options.
"""
kwargs = {'required': option.is_required}
self.fields[option.code] = forms.CharField(**kwargs)
# Cleaning
def clean_child_id(self):
try:
child = self.parent_product.children.get(
id=self.cleaned_data['child_id'])
except Product.DoesNotExist:
raise forms.ValidationError(
_("Please select a valid product"))
# To avoid duplicate SQL queries, we cache a copy of the loaded child
# product as we're going to need it later.
self.child_product = child
return self.cleaned_data['child_id']
def clean_quantity(self):
# Check that the proposed new line quantity is sensible
qty = self.cleaned_data['quantity']
basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
if basket_threshold:
total_basket_quantity = self.basket.num_items
max_allowed = basket_threshold - total_basket_quantity
if qty > max_allowed:
raise forms.ValidationError(
_("Due to technical limitations we are not able to ship"
" more than %(threshold)d items in one order. Your"
" basket currently has %(basket)d items.")
% {'threshold': basket_threshold,
'basket': total_basket_quantity})
return qty
@property
def product(self):
"""
The actual product being added to the basket
"""
# Note, the child product attribute is saved in the clean_child_id
# method
return getattr(self, 'child_product', self.parent_product)
def clean(self):
info = self.basket.strategy.fetch_for_product(self.product)
# Check currencies are sensible
if (self.basket.currency and
info.price.currency != self.basket.currency):
raise forms.ValidationError(
_("This product cannot be added to the basket as its currency "
"isn't the same as other products in your basket"))
# Check user has permission to add the desired quantity to their
# basket.
current_qty = self.basket.product_quantity(self.product)
desired_qty = current_qty + self.cleaned_data.get('quantity', 1)
is_permitted, reason = info.availability.is_purchase_permitted(
desired_qty)
if not is_permitted:
raise forms.ValidationError(reason)
return self.cleaned_data
# Helpers
def cleaned_options(self):
"""
Return submitted options in a clean format
"""
options = []
for option in self.parent_product.options:
if option.code in self.cleaned_data:
options.append({
'option': option,
'value': self.cleaned_data[option.code]})
return options
class SimpleAddToBasketForm(AddToBasketForm):
"""
Simplified version of the add to basket form where the quantity is
defaulted to 1 and rendered in a hidden widget
"""
quantity = forms.IntegerField(
initial=1, min_value=1, widget=forms.HiddenInput, label=_('Quantity'))
| bsd-3-clause |
LeoZ123/Machine-Learning-Practice | Support_Vector_Machine(SVM)/SVM_Example.py | 1 | 1872 | '''
Created on Mar 16, 2017
@author: Leo Zhong
'''
print(__doc__)
import numpy as np #for calculation
import pylab as pl #for plot
from sklearn import svm
# create 40 separable points
np.random.seed(0) #generate the same random num
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
print ("w: ", w)
print ("a: ", a)
# print (" xx: ", xx)
# print (" yy: ", yy)
print ("support_vectors_: ", clf.support_vectors_)
print ("clf.coef_: ", clf.coef_)
# In scikit-learn coef_ attribute holds the vectors of the separating hyperplanes for linear models.
# It has shape (n_classes, n_features) if n_classes > 1 (multi-class one-vs-all) and (1, n_features) for binary classification.
#
# In this toy binary classification example, n_features == 2,
# Hence w = coef_[0] is the vector orthogonal to the hyperplane (the hyperplane is fully defined by it + the intercept).
#
# To plot this hyperplane in the 2D case (any hyperplane of a 2D plane is a 1D line), find a f as in y = f(x) = a.x + b.
# In this case a is the slope of the line and can be computed by a = -w[0] / w[1].
# plot the line, the points, and the nearest vectors to the plane
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
| mit |
yanheven/nova | nova/tests/unit/virt/vmwareapi/test_imagecache.py | 43 | 12269 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_vmware.objects import datastore as ds_obj
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
CONF = cfg.CONF
class ImageCacheManagerTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
self._session = mock.Mock(name='session')
self._imagecache = imagecache.ImageCacheManager(self._session,
'fake-base-folder')
self._time = datetime.datetime(2012, 11, 22, 12, 00, 00)
self._file_name = 'ts-2012-11-22-12-00-00'
fake.reset()
def tearDown(self):
super(ImageCacheManagerTestCase, self).tearDown()
fake.reset()
def test_timestamp_cleanup(self):
def fake_get_timestamp(ds_browser, ds_path):
self.assertEqual('fake-ds-browser', ds_browser)
self.assertEqual('[fake-ds] fake-path', str(ds_path))
if not self.exists:
return
ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
timeutils.strtime(at=self._time,
fmt=imagecache.TIMESTAMP_FORMAT))
return ts
with contextlib.nested(
mock.patch.object(self._imagecache, '_get_timestamp',
fake_get_timestamp),
mock.patch.object(ds_util, 'file_delete')
) as (_get_timestamp, _file_delete):
self.exists = False
self._imagecache.timestamp_cleanup(
'fake-dc-ref', 'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(0, _file_delete.call_count)
self.exists = True
self._imagecache.timestamp_cleanup(
'fake-dc-ref', 'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
expected_ds_path = ds_obj.DatastorePath(
'fake-ds', 'fake-path', self._file_name)
_file_delete.assert_called_once_with(self._session,
expected_ds_path, 'fake-dc-ref')
def test_get_timestamp(self):
def fake_get_sub_folders(session, ds_browser, ds_path):
self.assertEqual('fake-ds-browser', ds_browser)
self.assertEqual('[fake-ds] fake-path', str(ds_path))
if self.exists:
files = set()
files.add(self._file_name)
return files
with contextlib.nested(
mock.patch.object(ds_util, 'get_sub_folders',
fake_get_sub_folders)
):
self.exists = True
ts = self._imagecache._get_timestamp(
'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(self._file_name, ts)
self.exists = False
ts = self._imagecache._get_timestamp(
'fake-ds-browser',
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertIsNone(ts)
def test_get_timestamp_filename(self):
timeutils.set_time_override(override_time=self._time)
fn = self._imagecache._get_timestamp_filename()
self.assertEqual(self._file_name, fn)
def test_get_datetime_from_filename(self):
t = self._imagecache._get_datetime_from_filename(self._file_name)
self.assertEqual(self._time, t)
def test_get_ds_browser(self):
cache = self._imagecache._ds_browser
ds_browser = mock.Mock()
moref = fake.ManagedObjectReference('datastore-100')
self.assertIsNone(cache.get(moref.value))
mock_get_method = mock.Mock(return_value=ds_browser)
with mock.patch.object(vim_util, 'get_dynamic_property',
mock_get_method):
ret = self._imagecache._get_ds_browser(moref)
mock_get_method.assert_called_once_with(mock.ANY, moref,
'Datastore', 'browser')
self.assertIs(ds_browser, ret)
self.assertIs(ds_browser, cache.get(moref.value))
def test_list_base_images(self):
def fake_get_dynamic_property(vim, mobj, type, property_name):
return 'fake-ds-browser'
def fake_get_sub_folders(session, ds_browser, ds_path):
files = set()
files.add('image-ref-uuid')
return files
with contextlib.nested(
mock.patch.object(vim_util, 'get_dynamic_property',
fake_get_dynamic_property),
mock.patch.object(ds_util, 'get_sub_folders',
fake_get_sub_folders)
) as (_get_dynamic, _get_sub_folders):
fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref')
datastore = ds_obj.Datastore(name='ds', ref=fake_ds_ref)
ds_path = datastore.build_path('base_folder')
images = self._imagecache._list_datastore_images(
ds_path, datastore)
originals = set()
originals.add('image-ref-uuid')
self.assertEqual({'originals': originals,
'unexplained_images': []},
images)
@mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get')
@mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup')
@mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser')
def test_enlist_image(self,
mock_get_ds_browser,
mock_timestamp_cleanup,
mock_timestamp_folder_get):
image_id = "fake_image_id"
dc_ref = "fake_dc_ref"
fake_ds_ref = mock.Mock()
ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=1,
freespace=1)
ds_browser = mock.Mock()
mock_get_ds_browser.return_value = ds_browser
timestamp_folder_path = mock.Mock()
mock_timestamp_folder_get.return_value = timestamp_folder_path
self._imagecache.enlist_image(image_id, ds, dc_ref)
cache_root_folder = ds.build_path("fake-base-folder")
mock_get_ds_browser.assert_called_once_with(
ds.ref)
mock_timestamp_folder_get.assert_called_once_with(
cache_root_folder, "fake_image_id")
mock_timestamp_cleanup.assert_called_once_with(
dc_ref, ds_browser, timestamp_folder_path)
def test_age_cached_images(self):
def fake_get_ds_browser(ds_ref):
return 'fake-ds-browser'
def fake_get_timestamp(ds_browser, ds_path):
self._get_timestamp_called += 1
path = str(ds_path)
if path == '[fake-ds] fake-path/fake-image-1':
# No time stamp exists
return
if path == '[fake-ds] fake-path/fake-image-2':
# Timestamp that will be valid => no deletion
return 'ts-2012-11-22-10-00-00'
if path == '[fake-ds] fake-path/fake-image-3':
# Timestamp that will be invalid => deletion
return 'ts-2012-11-20-12-00-00'
self.fail()
def fake_mkdir(session, ts_path, dc_ref):
self.assertEqual(
'[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
str(ts_path))
def fake_file_delete(session, ds_path, dc_ref):
self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
with contextlib.nested(
mock.patch.object(self._imagecache, '_get_ds_browser',
fake_get_ds_browser),
mock.patch.object(self._imagecache, '_get_timestamp',
fake_get_timestamp),
mock.patch.object(ds_util, 'mkdir',
fake_mkdir),
mock.patch.object(ds_util, 'file_delete',
fake_file_delete),
mock.patch.object(self._imagecache, 'timestamp_cleanup',
fake_timestamp_cleanup),
) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
_timestamp_cleanup):
timeutils.set_time_override(override_time=self._time)
datastore = ds_obj.Datastore(name='ds', ref='fake-ds-ref')
dc_info = vmops.DcInfo(ref='dc_ref', name='name',
vmFolder='vmFolder')
self._get_timestamp_called = 0
self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
'fake-image-3', 'fake-image-4'])
self._imagecache.used_images = set(['fake-image-4'])
self._imagecache._age_cached_images(
'fake-context', datastore, dc_info,
ds_obj.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(3, self._get_timestamp_called)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_update(self, mock_get_by_inst):
def fake_list_datastore_images(ds_path, datastore):
return {'unexplained_images': [],
'originals': self.images}
def fake_age_cached_images(context, datastore,
dc_info, ds_path):
self.assertEqual('[ds] fake-base-folder', str(ds_path))
self.assertEqual(self.images,
self._imagecache.used_images)
self.assertEqual(self.images,
self._imagecache.originals)
with contextlib.nested(
mock.patch.object(self._imagecache, '_list_datastore_images',
fake_list_datastore_images),
mock.patch.object(self._imagecache,
'_age_cached_images',
fake_age_cached_images)
) as (_list_base, _age_and_verify):
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
'host': CONF.host,
'name': 'inst-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = [fake_instance.fake_instance_obj(None, **instance)
for instance in instances]
self.images = set(['1', '2'])
datastore = ds_obj.Datastore(name='ds', ref='fake-ds-ref')
dc_info = vmops.DcInfo(ref='dc_ref', name='name',
vmFolder='vmFolder')
datastores_info = [(datastore, dc_info)]
self._imagecache.update('context', all_instances, datastores_info)
| apache-2.0 |
kalxas/QGIS | tests/src/python/test_qgssymbollayer_createsld.py | 30 | 61494 | """
***************************************************************************
test_qgssymbollayer_createsld.py
---------------------
Date : July 2016
Copyright : (C) 2016 by Andrea Aime
Email : andrea dot aime at geosolutions dot it
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *less
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Andrea Aime'
__date__ = 'July 2016'
__copyright__ = '(C) 2012, Andrea Aime'
import qgis # NOQA
from qgis.PyQt.QtCore import Qt, QDir, QFile, QIODevice, QPointF, QSizeF
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QColor, QFont
from qgis.core import (
QgsSimpleMarkerSymbolLayer, QgsSimpleMarkerSymbolLayerBase, QgsUnitTypes, QgsSvgMarkerSymbolLayer,
QgsFontMarkerSymbolLayer, QgsEllipseSymbolLayer, QgsSimpleLineSymbolLayer,
QgsMarkerLineSymbolLayer, QgsMarkerSymbol, QgsSimpleFillSymbolLayer, QgsSVGFillSymbolLayer,
QgsLinePatternFillSymbolLayer, QgsPointPatternFillSymbolLayer, QgsVectorLayer, QgsVectorLayerSimpleLabeling,
QgsTextBufferSettings, QgsPalLayerSettings, QgsTextBackgroundSettings, QgsRuleBasedLabeling)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
class TestQgsSymbolLayerCreateSld(unittest.TestCase):
"""
This class tests the creation of SLD from QGis layers
"""
def testSimpleMarkerRotation(self):
symbol = QgsSimpleMarkerSymbolLayer(
QgsSimpleMarkerSymbolLayerBase.Star, color=QColor(255, 0, 0), strokeColor=QColor(0, 255, 0), size=10)
symbol.setAngle(50)
dom, root = self.symbolToSld(symbol)
# print( "Simple marker rotation: " + root.ownerDocument().toString())
self.assertStaticRotation(root, '50')
def testSimpleMarkerUnitDefault(self):
symbol = QgsSimpleMarkerSymbolLayer(
QgsSimpleMarkerSymbolLayerBase.Star, color=QColor(255, 0, 0), strokeColor=QColor(0, 255, 0), size=10)
symbol.setStrokeWidth(3)
symbol.setOffset(QPointF(5, 10))
dom, root = self.symbolToSld(symbol)
# print("Simple marker unit mm: " + root.ownerDocument().toString())
# Check the size has been rescaled to pixels
self.assertStaticSize(root, '36')
# Check the same happened to the stroke width
self.assertStrokeWidth(root, 2, 11)
self.assertStaticDisplacement(root, 18, 36)
def testSimpleMarkerUnitPixels(self):
symbol = QgsSimpleMarkerSymbolLayer(
QgsSimpleMarkerSymbolLayerBase.Star, color=QColor(255, 0, 0), strokeColor=QColor(0, 255, 0), size=10)
symbol.setStrokeWidth(3)
symbol.setOffset(QPointF(5, 10))
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print("Marker unit mm: " + root.ownerDocument().toString())
# Check the size has not been rescaled
self.assertStaticSize(root, '10')
# Check the same happened to the stroke width
self.assertStrokeWidth(root, 2, 3)
self.assertStaticDisplacement(root, 5, 10)
def testSvgMarkerUnitDefault(self):
symbol = QgsSvgMarkerSymbolLayer('symbols/star.svg', 10, 90)
symbol.setFillColor(QColor("blue"))
symbol.setStrokeWidth(1)
symbol.setStrokeColor(QColor('red'))
symbol.setPath('symbols/star.svg')
symbol.setOffset(QPointF(5, 10))
dom, root = self.symbolToSld(symbol)
# print("Svg marker mm: " + dom.toString())
self.assertExternalGraphic(root, 0,
'symbols/star.svg?fill=%230000ff&fill-opacity=1&outline=%23ff0000&outline-opacity=1&outline-width=4',
'image/svg+xml')
self.assertExternalGraphic(root, 1,
'symbols/star.svg', 'image/svg+xml')
self.assertWellKnownMark(root, 0, 'square', '#0000ff', '#ff0000', 4)
# Check the size has been rescaled
self.assertStaticSize(root, '36')
# Check rotation for good measure
self.assertStaticRotation(root, '90')
self.assertStaticDisplacement(root, 18, 36)
def testSvgMarkerUnitPixels(self):
symbol = QgsSvgMarkerSymbolLayer('symbols/star.svg', 10, 0)
symbol.setFillColor(QColor("blue"))
symbol.setStrokeWidth(1)
symbol.setStrokeColor(QColor('red'))
symbol.setPath('symbols/star.svg')
symbol.setOffset(QPointF(5, 10))
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print("Svg marker unit px: " + dom.toString())
self.assertExternalGraphic(root, 0,
'symbols/star.svg?fill=%230000ff&fill-opacity=1&outline=%23ff0000&outline-opacity=1&outline-width=1',
'image/svg+xml')
self.assertExternalGraphic(root, 1,
'symbols/star.svg', 'image/svg+xml')
self.assertWellKnownMark(root, 0, 'square', '#0000ff', '#ff0000', 1)
# Check the size has not been rescaled
self.assertStaticSize(root, '10')
self.assertStaticDisplacement(root, 5, 10)
def testFontMarkerUnitDefault(self):
symbol = QgsFontMarkerSymbolLayer('sans', ',', 10, QColor('black'), 45)
symbol.setOffset(QPointF(5, 10))
dom, root = self.symbolToSld(symbol)
# print("Font marker unit mm: " + dom.toString())
# Check the size has been rescaled
self.assertStaticSize(root, '36')
self.assertStaticRotation(root, '45')
self.assertStaticDisplacement(root, 18, 36)
def testFontMarkerUnitPixel(self):
symbol = QgsFontMarkerSymbolLayer('sans', ',', 10, QColor('black'), 45)
symbol.setOffset(QPointF(5, 10))
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ("Font marker unit mm: " + dom.toString())
# Check the size has been rescaled
self.assertStaticSize(root, '10')
self.assertStaticRotation(root, '45')
self.assertStaticDisplacement(root, 5, 10)
def createEllipseSymbolLayer(self):
# No way to build it programmatically...
mTestName = 'QgsEllipseSymbolLayer'
mFilePath = QDir.toNativeSeparators(
'%s/symbol_layer/%s.sld' % (unitTestDataPath(), mTestName))
mDoc = QDomDocument(mTestName)
mFile = QFile(mFilePath)
mFile.open(QIODevice.ReadOnly)
mDoc.setContent(mFile, True)
mFile.close()
mSymbolLayer = QgsEllipseSymbolLayer.createFromSld(
mDoc.elementsByTagName('PointSymbolizer').item(0).toElement())
return mSymbolLayer
def testEllipseMarkerUnitDefault(self):
symbol = self.createEllipseSymbolLayer()
symbol.setOffset(QPointF(5, 10))
symbol.setOutputUnit(QgsUnitTypes.RenderMillimeters)
dom, root = self.symbolToSld(symbol)
# print ("Ellipse marker unit mm: " + dom.toString())
# Check the size has been rescaled
self.assertStaticSize(root, '25')
# Check also the stroke width
self.assertStrokeWidth(root, 2, 4)
self.assertStaticDisplacement(root, 18, 36)
def testEllipseMarkerUnitPixel(self):
symbol = self.createEllipseSymbolLayer()
symbol.setOffset(QPointF(5, 10))
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ("Ellipse marker unit mm: " + dom.toString())
# Check the size has been rescaled
self.assertStaticSize(root, '7')
# Check also the stroke width
self.assertStrokeWidth(root, 2, 1)
self.assertStaticDisplacement(root, 5, 10)
def testSimpleLineHairline(self):
symbol = QgsSimpleLineSymbolLayer(QColor("black"), 0)
dom, root = self.symbolToSld(symbol)
# print ("Simple line px: \n" + dom.toString())
# Hairline is turned into 0.5px
self.assertStrokeWidth(root, 1, 0.5)
def testSimpleLineUnitDefault(self):
symbol = QgsSimpleLineSymbolLayer(QColor("black"), 1)
symbol.setCustomDashVector([10, 10])
symbol.setUseCustomDashPattern(True)
symbol.setOffset(5)
dom, root = self.symbolToSld(symbol)
# print ("Simple line px: \n" + dom.toString())
self.assertStrokeWidth(root, 1, 4)
self.assertDashPattern(root, 4, '36 36')
self.assertStaticPerpendicularOffset(root, '18')
def testSimpleLineUnitPixel(self):
symbol = QgsSimpleLineSymbolLayer(QColor("black"), 1)
symbol.setCustomDashVector([10, 10])
symbol.setUseCustomDashPattern(True)
symbol.setOffset(5)
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ("Simple line px: \n" + dom.toString())
self.assertStrokeWidth(root, 1, 1)
self.assertDashPattern(root, 4, '10 10')
self.assertStaticPerpendicularOffset(root, '5')
def testMarkLineUnitDefault(self):
symbol = QgsMarkerLineSymbolLayer()
symbol.setSubSymbol(
QgsMarkerSymbol.createSimple({'color': '#ffffff', 'size': '3'}))
symbol.setInterval(5)
symbol.setOffset(5)
dom, root = self.symbolToSld(symbol)
# print ("Mark line mm: \n" + dom.toString())
# size of the mark
self.assertStaticSize(root, '11')
# gap and offset
self.assertStaticGap(root, '18')
self.assertStaticPerpendicularOffset(root, '18')
def testMarkLineUnitPixels(self):
symbol = QgsMarkerLineSymbolLayer()
symbol.setSubSymbol(
QgsMarkerSymbol.createSimple({'color': '#ffffff', 'size': '3'}))
symbol.setInterval(5)
symbol.setOffset(5)
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ("Mark line px: \n" + dom.toString())
# size of the mark
self.assertStaticSize(root, '3')
# gap and offset
self.assertStaticGap(root, '5')
self.assertStaticPerpendicularOffset(root, '5')
def testSimpleFillDefault(self):
symbol = QgsSimpleFillSymbolLayer(
QColor('red'), Qt.SolidPattern, QColor('green'), Qt.SolidLine, 5)
symbol.setOffset(QPointF(5, 10))
dom, root = self.symbolToSld(symbol)
# print ("Simple fill mm: \n" + dom.toString())
self.assertStrokeWidth(root, 2, 18)
self.assertStaticDisplacement(root, 18, 36)
def testSimpleFillPixels(self):
symbol = QgsSimpleFillSymbolLayer(
QColor('red'), Qt.SolidPattern, QColor('green'), Qt.SolidLine, 5)
symbol.setOffset(QPointF(5, 10))
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ( "Simple fill px: \n" + dom.toString())
self.assertStrokeWidth(root, 2, 5)
self.assertStaticDisplacement(root, 5, 10)
def testSvgFillDefault(self):
symbol = QgsSVGFillSymbolLayer('test/star.svg', 10, 45)
symbol.setSvgFillColor(QColor('blue'))
symbol.setSvgStrokeWidth(3)
symbol.setSvgStrokeColor(QColor('yellow'))
symbol.subSymbol().setWidth(10)
dom, root = self.symbolToSld(symbol)
# print ("Svg fill mm: \n" + dom.toString())
self.assertExternalGraphic(root, 0,
'test/star.svg?fill=%230000ff&fill-opacity=1&outline=%23ffff00&outline-opacity=1&outline-width=11',
'image/svg+xml')
self.assertExternalGraphic(root, 1,
'test/star.svg', 'image/svg+xml')
self.assertWellKnownMark(root, 0, 'square', '#0000ff', '#ffff00', 11)
self.assertStaticRotation(root, '45')
self.assertStaticSize(root, '36')
# width of the polygon stroke
lineSymbolizer = root.elementsByTagName('se:LineSymbolizer').item(0).toElement()
self.assertStrokeWidth(lineSymbolizer, 1, 36)
def testSvgFillPixel(self):
symbol = QgsSVGFillSymbolLayer('test/star.svg', 10, 45)
symbol.setSvgFillColor(QColor('blue'))
symbol.setSvgStrokeWidth(3)
symbol.setSvgStrokeColor(QColor('black'))
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
symbol.subSymbol().setWidth(10)
dom, root = self.symbolToSld(symbol)
# print ("Svg fill px: \n" + dom.toString())
self.assertExternalGraphic(root, 0,
'test/star.svg?fill=%230000ff&fill-opacity=1&outline=%23000000&outline-opacity=1&outline-width=3',
'image/svg+xml')
self.assertExternalGraphic(root, 1,
'test/star.svg', 'image/svg+xml')
self.assertWellKnownMark(root, 0, 'square', '#0000ff', '#000000', 3)
self.assertStaticRotation(root, '45')
self.assertStaticSize(root, '10')
# width of the polygon stroke
lineSymbolizer = root.elementsByTagName('se:LineSymbolizer').item(0).toElement()
self.assertStrokeWidth(lineSymbolizer, 1, 10)
def testLineFillDefault(self):
symbol = QgsLinePatternFillSymbolLayer()
symbol.setLineAngle(45)
symbol.setLineWidth(1)
symbol.setOffset(5)
dom, root = self.symbolToSld(symbol)
# print ("Line fill mm: \n" + dom.toString())
self.assertStaticRotation(root, '45')
self.assertStrokeWidth(root, 1, 4)
self.assertStaticSize(root, '18')
self.assertStaticDisplacement(root, 15, 9)
def testLineFillPixels(self):
symbol = QgsLinePatternFillSymbolLayer()
symbol.setLineAngle(45)
symbol.setLineWidth(1)
symbol.setOffset(5)
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ("Line fill px: \n" + dom.toString())
self.assertStaticRotation(root, '45')
self.assertStrokeWidth(root, 1, 1)
self.assertStaticSize(root, '5')
self.assertStaticDisplacement(root, 4.25, 2.63)
def testPointFillDefault(self):
symbol = QgsPointPatternFillSymbolLayer()
dom, root = self.symbolToSld(symbol)
# print ("Point fill mm: \n" + dom.toString())
self.assertStaticSize(root, '7')
def testPointFillpixels(self):
symbol = QgsPointPatternFillSymbolLayer()
symbol.setOutputUnit(QgsUnitTypes.RenderPixels)
dom, root = self.symbolToSld(symbol)
# print ("Point fill px: \n" + dom.toString())
self.assertStaticSize(root, '2')
def testSingleSymbolNoScaleDependencies(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "singleSymbol"))
layer.loadNamedStyle(mFilePath)
dom, root = self.layerToSld(layer)
# print("No dep on single symbol:" + dom.toString())
self.assertScaleDenominator(root, None, None)
def testSingleSymbolScaleDependencies(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "singleSymbol"))
layer.loadNamedStyle(mFilePath)
layer.setMaximumScale(1000)
layer.setMinimumScale(500000)
layer.setScaleBasedVisibility(True)
dom, root = self.layerToSld(layer)
# print("Scale dep on single symbol:" + dom.toString())
self.assertScaleDenominator(root, '1000', '500000')
def testCategorizedNoScaleDependencies(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "categorized"))
layer.loadNamedStyle(mFilePath)
dom, root = self.layerToSld(layer)
# print("Categorized no scale deps:" + dom.toString())
ruleCount = root.elementsByTagName('se:Rule').size()
for i in range(0, ruleCount):
self.assertScaleDenominator(root, None, None, i)
def testCategorizedWithScaleDependencies(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "categorized"))
layer.loadNamedStyle(mFilePath)
layer.setMaximumScale(1000)
layer.setMinimumScale(500000)
layer.setScaleBasedVisibility(True)
dom, root = self.layerToSld(layer)
# print("Categorized with scale deps:" + dom.toString())
ruleCount = root.elementsByTagName('se:Rule').size()
for i in range(0, ruleCount):
self.assertScaleDenominator(root, '1000', '500000', i)
def testGraduatedNoScaleDependencies(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "graduated"))
status = layer.loadNamedStyle(mFilePath) # NOQA
dom, root = self.layerToSld(layer)
# print("Graduated no scale deps:" + dom.toString())
ruleCount = root.elementsByTagName('se:Rule').size()
for i in range(0, ruleCount):
self.assertScaleDenominator(root, None, None, i)
# def testRuleBasedNoRootScaleDependencies(self):
# layer = QgsVectorLayer("Polygon", "addfeat", "memory")
#
# mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "ruleBased"))
# status = layer.loadNamedStyle(mFilePath) # NOQA
#
# dom, root = self.layerToSld(layer)
# print(("Rule based, no root scale deps:" + dom.toString()))
#
# ruleCount = root.elementsByTagName('se:Rule').size() # NOQA
# self.assertScaleDenominator(root, '1000', '40000000', 0)
# self.assertScaleDenominator(root, None, None, 1)
def testRuleBasedNoRootScaleDependencies(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "ruleBased"))
status = layer.loadNamedStyle(mFilePath) # NOQA
layer.setMaximumScale(5000)
layer.setMinimumScale(50000000)
layer.setScaleBasedVisibility(True)
dom, root = self.layerToSld(layer)
# print("Rule based, with root scale deps:" + dom.toString())
ruleCount = root.elementsByTagName('se:Rule').size() # NOQA
self.assertScaleDenominator(root, '5000', '40000000', 0)
self.assertScaleDenominator(root, '5000', '50000000', 1)
def testCategorizedFunctionConflict(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators(
'%s/symbol_layer/%s.qml' % (unitTestDataPath(), "categorizedFunctionConflict"))
status = layer.loadNamedStyle(mFilePath) # NOQA
dom, root = self.layerToSld(layer)
# print("Rule based, with root scale deps:" + dom.toString())
ruleCount = root.elementsByTagName('se:Rule').size() # NOQA
self.assertEqual(7, ruleCount)
self.assertRuleRangeFilter(root, 0, 'Area', '0', True, '500', True)
self.assertRuleRangeFilter(root, 1, 'Area', '500', False, '1000', True)
self.assertRuleRangeFilter(root, 2, 'Area', '1000', False, '5000', True)
self.assertRuleRangeFilter(root, 3, 'Area', '5000', False, '10000', True)
self.assertRuleRangeFilter(root, 4, 'Area', '10000', False, '50000', True)
self.assertRuleRangeFilter(root, 5, 'Area', '50000', False, '100000', True)
self.assertRuleRangeFilter(root, 6, 'Area', '100000', False, '200000', True)
def assertRuleRangeFilter(self, root, index, attributeName, min, includeMin, max, includeMax):
rule = root.elementsByTagName('se:Rule').item(index).toElement()
filter = rule.elementsByTagName("Filter").item(0).firstChild()
self.assertEqual("ogc:And", filter.nodeName())
gt = filter.firstChild()
expectedGtName = "ogc:PropertyIsGreaterThanOrEqualTo" if includeMin else "ogc:PropertyIsGreaterThan"
self.assertEqual(expectedGtName, gt.nodeName())
gtProperty = gt.firstChild()
self.assertEqual("ogc:PropertyName", gtProperty.nodeName())
self.assertEqual(attributeName, gtProperty.toElement().text())
gtValue = gt.childNodes().item(1)
self.assertEqual(min, gtValue.toElement().text())
lt = filter.childNodes().item(1)
expectedLtName = "ogc:PropertyIsLessThanOrEqualTo" if includeMax else "ogc:PropertyIsLessThan"
self.assertEqual(expectedLtName, lt.nodeName())
ltProperty = lt.firstChild()
self.assertEqual("ogc:PropertyName", ltProperty.nodeName())
self.assertEqual(attributeName, ltProperty.toElement().text())
ltValue = lt.childNodes().item(1)
self.assertEqual(max, ltValue.toElement().text())
def testSimpleLabeling(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
# Pick a local default font
fontFamily = QFont().family()
settings = layer.labeling().settings()
format = settings.format()
font = format.font()
font.setFamily(fontFamily)
font.setBold(False)
font.setItalic(False)
format.setFont(font)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Simple label text symbolizer" + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
self.assertPropertyName(ts, 'se:Label', 'NAME')
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual(fontFamily, self.assertSvgParameter(font, 'font-family').text())
self.assertEqual('11', self.assertSvgParameter(font, 'font-size').text())
fill = self.assertElement(ts, 'se:Fill', 0)
self.assertEqual('#000000', self.assertSvgParameter(fill, "fill").text())
self.assertIsNone(self.assertSvgParameter(fill, "fill-opacity", True))
def testLabelingUomMillimeter(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
self.updateLayerLabelingUnit(layer, QgsUnitTypes.RenderMillimeters)
dom, root = self.layerToSld(layer)
# print("Label sized in mm " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('32', self.assertSvgParameter(font, 'font-size').text())
def testLabelingUomPixels(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
self.updateLayerLabelingUnit(layer, QgsUnitTypes.RenderPixels)
dom, root = self.layerToSld(layer)
# print("Label sized in pixels " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('9', self.assertSvgParameter(font, 'font-size').text())
def testLabelingUomInches(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
self.updateLayerLabelingUnit(layer, QgsUnitTypes.RenderInches)
dom, root = self.layerToSld(layer)
# print("Label sized in inches " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('816', self.assertSvgParameter(font, 'font-size').text())
def testTextStyle(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
# testing regular
self.updateLayerLabelingFontStyle(layer, False, False)
dom, root = self.layerToSld(layer)
# print("Simple label italic text" + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertIsNone(self.assertSvgParameter(font, 'font-weight', True))
self.assertIsNone(self.assertSvgParameter(font, 'font-style', True))
# testing bold
self.updateLayerLabelingFontStyle(layer, True, False)
dom, root = self.layerToSld(layer)
# print("Simple label bold text" + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('bold', self.assertSvgParameter(font, 'font-weight').text())
self.assertIsNone(self.assertSvgParameter(font, 'font-style', True))
# testing italic
self.updateLayerLabelingFontStyle(layer, False, True)
dom, root = self.layerToSld(layer)
# print("Simple label italic text" + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('italic', self.assertSvgParameter(font, 'font-style').text())
self.assertIsNone(self.assertSvgParameter(font, 'font-weight', True))
# testing bold italic
self.updateLayerLabelingFontStyle(layer, True, True)
dom, root = self.layerToSld(layer)
# print("Simple label bold and italic text" + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('italic', self.assertSvgParameter(font, 'font-style').text())
self.assertEqual('bold', self.assertSvgParameter(font, 'font-weight').text())
# testing underline and strikethrough vendor options
self.updateLayerLabelingFontStyle(layer, False, False, True, True)
dom, root = self.layerToSld(layer)
# print("Simple label underline and strikethrough text" + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
font = self.assertElement(ts, 'se:Font', 0)
self.assertEqual('true', self.assertVendorOption(ts, 'underlineText').text())
self.assertEqual('true', self.assertVendorOption(ts, 'strikethroughText').text())
def testTextMixedCase(self):
self.assertCapitalizationFunction(QFont.MixedCase, None)
def testTextUppercase(self):
self.assertCapitalizationFunction(QFont.AllUppercase, "strToUpperCase")
def testTextLowercase(self):
self.assertCapitalizationFunction(QFont.AllLowercase, "strToLowerCase")
def testTextCapitalcase(self):
self.assertCapitalizationFunction(QFont.Capitalize, "strCapitalize")
def assertCapitalizationFunction(self, capitalization, expectedFunction):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
settings = layer.labeling().settings()
format = settings.format()
font = format.font()
font.setCapitalization(capitalization)
format.setFont(font)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Simple text with capitalization " + str(QFont.AllUppercase) + ": " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
label = self.assertElement(ts, "se:Label", 0)
if expectedFunction is None:
property = self.assertElement(label, "ogc:PropertyName", 0)
self.assertEqual("NAME", property.text())
else:
function = self.assertElement(label, "ogc:Function", 0)
self.assertEqual(expectedFunction, function.attribute("name"))
property = self.assertElement(function, "ogc:PropertyName", 0)
self.assertEqual("NAME", property.text())
def testLabelingTransparency(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
settings = layer.labeling().settings()
format = settings.format()
format.setOpacity(0.5)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Label with transparency " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
fill = self.assertElement(ts, 'se:Fill', 0)
self.assertEqual('#000000', self.assertSvgParameter(fill, "fill").text())
self.assertEqual('0.5', self.assertSvgParameter(fill, "fill-opacity").text())
def testLabelingBuffer(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
buffer = QgsTextBufferSettings()
buffer.setEnabled(True)
buffer.setSize(10)
buffer.setSizeUnit(QgsUnitTypes.RenderPixels)
buffer.setColor(QColor("Black"))
self.setLabelBufferSettings(layer, buffer)
dom, root = self.layerToSld(layer)
# print("Label with buffer 10 px " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
halo = self.assertElement(ts, 'se:Halo', 0)
# not full width, just radius here
self.assertEqual('5', self.assertElement(ts, 'se:Radius', 0).text())
haloFill = self.assertElement(halo, 'se:Fill', 0)
self.assertEqual('#000000', self.assertSvgParameter(haloFill, "fill").text())
def testLabelingBufferPointTranslucent(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
buffer = QgsTextBufferSettings()
buffer.setEnabled(True)
buffer.setSize(10)
buffer.setSizeUnit(QgsUnitTypes.RenderPoints)
buffer.setColor(QColor("Red"))
buffer.setOpacity(0.5)
self.setLabelBufferSettings(layer, buffer)
dom, root = self.layerToSld(layer)
# print("Label with buffer 10 points, red 50% transparent " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
halo = self.assertElement(ts, 'se:Halo', 0)
# not full width, just radius here
self.assertEqual('6.5', self.assertElement(ts, 'se:Radius', 0).text())
haloFill = self.assertElement(halo, 'se:Fill', 0)
self.assertEqual('#ff0000', self.assertSvgParameter(haloFill, "fill").text())
self.assertEqual('0.5', self.assertSvgParameter(haloFill, "fill-opacity").text())
def testLabelingLowPriority(self):
self.assertLabelingPriority(0, 0, '0')
def testLabelingDefaultPriority(self):
self.assertLabelingPriority(0, 5, None)
def testLabelingHighPriority(self):
self.assertLabelingPriority(0, 10, '1000')
def testLabelingZIndexLowPriority(self):
self.assertLabelingPriority(1, 0, '1001')
def testLabelingZIndexDefaultPriority(self):
self.assertLabelingPriority(1, 5, "1500")
def testLabelingZIndexHighPriority(self):
self.assertLabelingPriority(1, 10, '2000')
def assertLabelingPriority(self, zIndex, priority, expectedSldPriority):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
settings = layer.labeling().settings()
settings.zIndex = zIndex
settings.priority = priority
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Label with zIndex at " + str(zIndex) + " and priority at " + str(priority) + ": " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
priorityElement = self.assertElement(ts, "se:Priority", 0, True)
if expectedSldPriority is None:
self.assertIsNone(priorityElement)
else:
self.assertEqual(expectedSldPriority, priorityElement.text())
def testLabelingPlacementOverPointOffsetRotation(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
settings = layer.labeling().settings()
settings.placement = QgsPalLayerSettings.OverPoint
settings.xOffset = 5
settings.yOffset = 10
settings.offsetUnits = QgsUnitTypes.RenderMillimeters
settings.quadOffset = QgsPalLayerSettings.QuadrantOver
settings.angleOffset = 30
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Label with 'over point' placement " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
pointPlacement = self.assertPointPlacement(ts)
self.assertStaticDisplacement(pointPlacement, 18, 36)
self.assertStaticAnchorPoint(pointPlacement, 0.5, 0.5)
def testPointPlacementAboveLeft(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantAboveLeft, "AboveLeft", 1, 0)
def testPointPlacementAbove(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantAbove, "Above", 0.5, 0)
def testPointPlacementAboveRight(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantAboveRight, "AboveRight", 0, 0)
def testPointPlacementLeft(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantLeft, "Left", 1, 0.5)
def testPointPlacementRight(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantRight, "Right", 0, 0.5)
def testPointPlacementBelowLeft(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantBelowLeft, "BelowLeft", 1, 1)
def testPointPlacementBelow(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantBelow, "Below", 0.5, 1)
def testPointPlacementAboveRight(self):
self.assertLabelQuadrant(QgsPalLayerSettings.QuadrantBelowRight, "BelowRight", 0, 1)
def testPointPlacementCartoraphic(self):
self.assertPointPlacementDistance(QgsPalLayerSettings.OrderedPositionsAroundPoint)
def testPointPlacementCartoraphic(self):
self.assertPointPlacementDistance(QgsPalLayerSettings.AroundPoint)
def testLineParallelPlacement(self):
layer = QgsVectorLayer("LineString", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "lineLabel")
dom, root = self.layerToSld(layer)
# print("Label with parallel line placement " + dom.toString())
linePlacement = self.assertLinePlacement(root)
generalize = self.assertElement(linePlacement, 'se:GeneralizeLine', 0)
self.assertEqual("true", generalize.text())
def testLineParallelPlacementOffsetRepeat(self):
layer = QgsVectorLayer("LineString", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "lineLabel")
self.updateLinePlacementProperties(layer, QgsPalLayerSettings.Line, 2, 50)
dom, root = self.layerToSld(layer)
# print("Label with parallel line placement, perp. offset and repeat " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
linePlacement = self.assertLinePlacement(ts)
generalize = self.assertElement(linePlacement, 'se:GeneralizeLine', 0)
self.assertEqual("true", generalize.text())
offset = self.assertElement(linePlacement, 'se:PerpendicularOffset', 0)
self.assertEqual("7", offset.text())
repeat = self.assertElement(linePlacement, 'se:Repeat', 0)
self.assertEqual("true", repeat.text())
gap = self.assertElement(linePlacement, 'se:Gap', 0)
self.assertEqual("179", gap.text())
self.assertEqual("179", self.assertVendorOption(ts, "repeat").text())
def testLineCurvePlacementOffsetRepeat(self):
layer = QgsVectorLayer("LineString", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "lineLabel")
self.updateLinePlacementProperties(layer, QgsPalLayerSettings.Curved, 2, 50, 30, 40)
dom, root = self.layerToSld(layer)
# print("Label with curved line placement " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
linePlacement = self.assertLinePlacement(ts)
generalize = self.assertElement(linePlacement, 'se:GeneralizeLine', 0)
self.assertEqual("true", generalize.text())
offset = self.assertElement(linePlacement, 'se:PerpendicularOffset', 0)
self.assertEqual("7", offset.text())
repeat = self.assertElement(linePlacement, 'se:Repeat', 0)
self.assertEqual("true", repeat.text())
gap = self.assertElement(linePlacement, 'se:Gap', 0)
self.assertEqual("179", gap.text())
self.assertEqual("179", self.assertVendorOption(ts, "repeat").text())
self.assertEqual("true", self.assertVendorOption(ts, "followLine").text())
self.assertEqual("30", self.assertVendorOption(ts, "maxAngleDelta").text())
def testLineCurveMergeLines(self):
layer = QgsVectorLayer("LineString", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "lineLabel")
settings = layer.labeling().settings()
settings.placement = QgsPalLayerSettings.Curved
settings.mergeLines = True
settings.labelPerPart = True
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Label with curved line and line grouping " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
self.assertEqual("yes", self.assertVendorOption(ts, "group").text())
self.assertEqual("true", self.assertVendorOption(ts, "labelAllGroup").text())
def testLabelingPolygonFree(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "polygonLabel")
settings = layer.labeling().settings()
settings.placement = QgsPalLayerSettings.Free
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Polygon label with 'Free' placement " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
pointPlacement = self.assertPointPlacement(ts)
self.assertIsNone(self.assertElement(ts, "se:Displacement", 0, True))
self.assertStaticAnchorPoint(pointPlacement, 0.5, 0.5)
def testLabelingPolygonPerimeterCurved(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "polygonLabel")
self.updateLinePlacementProperties(layer, QgsPalLayerSettings.PerimeterCurved, 2, 50, 30, -40)
dom, root = self.layerToSld(layer)
# print("Polygon Label with curved perimeter line placement " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
linePlacement = self.assertLinePlacement(ts)
generalize = self.assertElement(linePlacement, 'se:GeneralizeLine', 0)
self.assertEqual("true", generalize.text())
offset = self.assertElement(linePlacement, 'se:PerpendicularOffset', 0)
self.assertEqual("7", offset.text())
repeat = self.assertElement(linePlacement, 'se:Repeat', 0)
self.assertEqual("true", repeat.text())
gap = self.assertElement(linePlacement, 'se:Gap', 0)
self.assertEqual("179", gap.text())
self.assertEqual("179", self.assertVendorOption(ts, "repeat").text())
self.assertEqual("true", self.assertVendorOption(ts, "followLine").text())
self.assertEqual("30", self.assertVendorOption(ts, "maxAngleDelta").text())
def testLabelScaleDependencies(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "polygonLabel")
settings = layer.labeling().settings()
settings.scaleVisibility = True
# Careful: min scale -> large scale denomin
settings.minimumScale = 10000000
settings.maximumScale = 1000000
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Labeling with scale dependencies " + dom.toString())
self.assertScaleDenominator(root, "1000000", "10000000", 1)
def testLabelShowAll(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "polygonLabel")
settings = layer.labeling().settings()
settings.displayAll = True
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Labeling, showing all labels " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
self.assertVendorOption(ts, "conflictResolution", "false")
def testLabelUpsideDown(self):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "polygonLabel")
settings = layer.labeling().settings()
settings.upsidedownLabels = QgsPalLayerSettings.ShowAll
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Labeling, showing upside down labels on lines " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
self.assertVendorOption(ts, "forceLeftToRight", "false")
def testLabelBackgroundSquareResize(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeSquare, 'square',
QgsTextBackgroundSettings.SizeBuffer, 'proportional')
def testLabelBackgroundRectangleResize(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeRectangle, 'square',
QgsTextBackgroundSettings.SizeBuffer, 'stretch')
def testLabelBackgroundCircleResize(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeCircle, 'circle',
QgsTextBackgroundSettings.SizeBuffer, 'proportional')
def testLabelBackgroundEllipseResize(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeEllipse, 'circle',
QgsTextBackgroundSettings.SizeBuffer, 'stretch')
def testLabelBackgroundSquareAbsolute(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeSquare, 'square',
QgsTextBackgroundSettings.SizeFixed, None)
def testLabelBackgroundRectangleAbsolute(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeRectangle, 'square',
QgsTextBackgroundSettings.SizeFixed, None)
def testLabelBackgroundCircleAbsolute(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeCircle, 'circle',
QgsTextBackgroundSettings.SizeFixed, None)
def testLabelBackgroundEllipseAbsolute(self):
self.assertLabelBackground(QgsTextBackgroundSettings.ShapeEllipse, 'circle',
QgsTextBackgroundSettings.SizeFixed, None)
def assertLabelBackground(self, backgroundType, expectedMarkName, sizeType, expectedResize):
layer = QgsVectorLayer("Polygon", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "polygonLabel")
settings = layer.labeling().settings()
background = QgsTextBackgroundSettings()
background.setEnabled(True)
background.setType(backgroundType)
background.setFillColor(QColor('yellow'))
background.setStrokeColor(QColor('black'))
background.setStrokeWidth(2)
background.setSize(QSizeF(10, 10))
background.setSizeType(sizeType)
format = settings.format()
format.setBackground(background)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Labeling, with background type " + str(backgroundType) + " and size type " + str(sizeType) + ": " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
graphic = self.assertElement(ts, "se:Graphic", 0)
self.assertEqual("36", self.assertElement(graphic, 'se:Size', 0).text())
self.assertWellKnownMark(graphic, 0, expectedMarkName, '#ffff00', '#000000', 7)
if expectedResize is None:
self.assertIsNone(expectedResize, self.assertVendorOption(ts, 'graphic-resize', True))
else:
self.assertEqual(expectedResize, self.assertVendorOption(ts, 'graphic-resize').text())
if sizeType == 0:
# check extra padding for proportional ellipse
if backgroundType == QgsTextBackgroundSettings.ShapeEllipse:
self.assertEqual("42.5 49", self.assertVendorOption(ts, 'graphic-margin').text())
else:
self.assertEqual("36 36", self.assertVendorOption(ts, 'graphic-margin').text())
else:
self.assertIsNone(self.assertVendorOption(ts, 'graphic-margin', True))
def testRuleBasedLabels(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "ruleLabel")
dom, root = self.layerToSld(layer)
# print("Rule based labeling: " + dom.toString())
# three rules, one with the point symbol, one with the first rule based label,
# one with the second rule based label
rule1 = self.getRule(root, 0)
self.assertElement(rule1, 'se:PointSymbolizer', 0)
rule2 = self.getRule(root, 1)
self.assertScaleDenominator(root, '100000', '10000000', 1)
tsRule2 = self.assertElement(rule2, 'se:TextSymbolizer', 0)
gt = rule2.elementsByTagName("Filter").item(0).firstChild()
self.assertEqual("ogc:PropertyIsGreaterThan", gt.nodeName())
gtProperty = gt.toElement().firstChild()
self.assertEqual("ogc:PropertyName", gtProperty.nodeName())
self.assertEqual("POP_MAX", gtProperty.toElement().text())
gtValue = gt.childNodes().item(1)
self.assertEqual("1000000", gtValue.toElement().text())
rule3 = self.getRule(root, 2)
tsRule3 = self.assertElement(rule3, 'se:TextSymbolizer', 0)
lt = rule3.elementsByTagName("Filter").item(0).firstChild()
self.assertEqual("ogc:PropertyIsLessThan", lt.nodeName())
ltProperty = lt.toElement().firstChild()
self.assertEqual("ogc:PropertyName", ltProperty.nodeName())
self.assertEqual("POP_MAX", ltProperty.toElement().text())
ltValue = gt.childNodes().item(1)
self.assertEqual("1000000", gtValue.toElement().text())
# check that adding a rule without settings does not segfault
xml1 = dom.toString()
layer.labeling().rootRule().appendChild(QgsRuleBasedLabeling.Rule(None))
dom, root = self.layerToSld(layer)
xml2 = dom.toString()
self.assertEqual(xml1, xml2)
def updateLinePlacementProperties(self, layer, linePlacement, distance, repeat, maxAngleInternal=25,
maxAngleExternal=-25):
settings = layer.labeling().settings()
settings.placement = linePlacement
settings.dist = distance
settings.repeatDistance = repeat
settings.maxCurvedCharAngleIn = maxAngleInternal
settings.maxCurvedCharAngleOut = maxAngleExternal
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
def assertPointPlacementDistance(self, placement):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
settings = layer.labeling().settings()
settings.placement = placement
settings.xOffset = 0
settings.yOffset = 0
settings.dist = 2
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Label with around point placement " + dom.toString())
ts = self.getTextSymbolizer(root, 1, 0)
pointPlacement = self.assertPointPlacement(ts)
self.assertStaticAnchorPoint(pointPlacement, 0, 0.5)
self.assertStaticDisplacement(pointPlacement, 4.95, 4.95)
def assertLabelQuadrant(self, quadrant, label, ax, ay):
layer = QgsVectorLayer("Point", "addfeat", "memory")
self.loadStyleWithCustomProperties(layer, "simpleLabel")
settings = layer.labeling().settings()
settings.placement = QgsPalLayerSettings.OverPoint
settings.xOffset = 0
settings.yOffset = 0
settings.quadOffset = quadrant
settings.angleOffset = 0
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
dom, root = self.layerToSld(layer)
# print("Label with " + label + " placement " + dom.toString())
self.assertStaticAnchorPoint(root, ax, ay)
def setLabelBufferSettings(self, layer, buffer):
settings = layer.labeling().settings()
format = settings.format()
format.setBuffer(buffer)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
def updateLayerLabelingFontStyle(self, layer, bold, italic, underline=False, strikeout=False):
settings = layer.labeling().settings()
format = settings.format()
font = format.font()
font.setBold(bold)
font.setItalic(italic)
font.setUnderline(underline)
font.setStrikeOut(strikeout)
format.setFont(font)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
def updateLayerLabelingUnit(self, layer, unit):
settings = layer.labeling().settings()
format = settings.format()
format.setSizeUnit(unit)
settings.setFormat(format)
layer.setLabeling(QgsVectorLayerSimpleLabeling(settings))
def loadStyleWithCustomProperties(self, layer, qmlFileName):
# load the style, only vector symbology
path = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), qmlFileName))
# labeling is in custom properties, they need to be loaded separately
status = layer.loadNamedStyle(path)
doc = QDomDocument()
file = QFile(path)
file.open(QIODevice.ReadOnly)
doc.setContent(file, True)
file.close()
flag = layer.readCustomProperties(doc.documentElement())
def assertPointPlacement(self, textSymbolizer):
labelPlacement = self.assertElement(textSymbolizer, 'se:LabelPlacement', 0)
self.assertIsNone(self.assertElement(labelPlacement, 'se:LinePlacement', 0, True))
pointPlacement = self.assertElement(labelPlacement, 'se:PointPlacement', 0)
return pointPlacement
def assertLinePlacement(self, textSymbolizer):
labelPlacement = self.assertElement(textSymbolizer, 'se:LabelPlacement', 0)
self.assertIsNone(self.assertElement(labelPlacement, 'se:PointPlacement', 0, True))
linePlacement = self.assertElement(labelPlacement, 'se:LinePlacement', 0)
return linePlacement
def assertElement(self, container, elementName, index, allowMissing=False):
list = container.elementsByTagName(elementName)
if list.size() <= index:
if allowMissing:
return None
else:
self.fail('Expected to find at least ' + str(
index + 1) + ' ' + elementName + ' in ' + container.nodeName() + ' but found ' + str(list.size()))
node = list.item(index)
self.assertTrue(node.isElement(), 'Found node but it''s not an element')
return node.toElement()
def getRule(self, root, ruleIndex):
rule = self.assertElement(root, 'se:Rule', ruleIndex)
return rule
def getTextSymbolizer(self, root, ruleIndex, textSymbolizerIndex):
rule = self.assertElement(root, 'se:Rule', ruleIndex)
textSymbolizer = self.assertElement(rule, 'se:TextSymbolizer', textSymbolizerIndex)
return textSymbolizer
def assertPropertyName(self, root, containerProperty, expectedAttributeName):
container = root.elementsByTagName(containerProperty).item(0).toElement()
property = container.elementsByTagName("ogc:PropertyName").item(0).toElement()
self.assertEqual(expectedAttributeName, property.text())
def assertSvgParameter(self, container, expectedName, allowMissing=False):
list = container.elementsByTagName("se:SvgParameter")
for i in range(0, list.size()):
item = list.item(i)
if item.isElement and item.isElement() and item.toElement().attribute('name') == expectedName:
return item.toElement()
if allowMissing:
return None
else:
self.fail('Could not find a se:SvgParameter named ' + expectedName + ' in ' + container.nodeName())
def assertVendorOption(self, container, expectedName, allowMissing=False):
list = container.elementsByTagName("se:VendorOption")
for i in range(0, list.size()):
item = list.item(i)
if item.isElement and item.isElement() and item.toElement().attribute('name') == expectedName:
return item.toElement()
if allowMissing:
return None
else:
self.fail('Could not find a se:VendorOption named ' + expectedName + ' in ' + container.nodeName())
def testRuleBaseEmptyFilter(self):
layer = QgsVectorLayer("Point", "addfeat", "memory")
mFilePath = QDir.toNativeSeparators('%s/symbol_layer/%s.qml' % (unitTestDataPath(), "categorizedEmptyValue"))
status = layer.loadNamedStyle(mFilePath) # NOQA
dom, root = self.layerToSld(layer)
# print("Rule based, with last rule checking against empty value:" + dom.toString())
# get the third rule
rule = root.elementsByTagName('se:Rule').item(2).toElement()
filter = rule.elementsByTagName('Filter').item(0).toElement()
filter = filter.firstChild().toElement()
self.assertEqual("ogc:Or", filter.nodeName())
self.assertEqual(1, filter.elementsByTagName('ogc:PropertyIsEqualTo').size())
self.assertEqual(1, filter.elementsByTagName('ogc:PropertyIsNull').size())
def assertScaleDenominator(self, root, expectedMinScale, expectedMaxScale, index=0):
rule = root.elementsByTagName('se:Rule').item(index).toElement()
if expectedMinScale:
minScale = rule.elementsByTagName('se:MinScaleDenominator').item(0)
self.assertEqual(expectedMinScale, minScale.firstChild().nodeValue())
else:
self.assertEqual(0, root.elementsByTagName('se:MinScaleDenominator').size())
if expectedMaxScale:
maxScale = rule.elementsByTagName('se:MaxScaleDenominator').item(0)
self.assertEqual(expectedMaxScale, maxScale.firstChild().nodeValue())
else:
self.assertEqual(0, root.elementsByTagName('se:MaxScaleDenominator').size())
def assertDashPattern(self, root, svgParameterIdx, expectedPattern):
strokeWidth = root.elementsByTagName(
'se:SvgParameter').item(svgParameterIdx)
svgParameterName = strokeWidth.attributes().namedItem('name')
self.assertEqual("stroke-dasharray", svgParameterName.nodeValue())
self.assertEqual(
expectedPattern, strokeWidth.firstChild().nodeValue())
def assertStaticGap(self, root, expectedValue):
# Check the rotation element is a literal, not a
rotation = root.elementsByTagName('se:Gap').item(0)
literal = rotation.firstChild()
self.assertEqual("ogc:Literal", literal.nodeName())
self.assertEqual(expectedValue, literal.firstChild().nodeValue())
def assertStaticSize(self, root, expectedValue):
size = root.elementsByTagName('se:Size').item(0)
self.assertEqual(expectedValue, size.firstChild().nodeValue())
def assertExternalGraphic(self, root, index, expectedLink, expectedFormat):
graphic = root.elementsByTagName('se:ExternalGraphic').item(index)
onlineResource = graphic.firstChildElement('se:OnlineResource')
self.assertEqual(expectedLink, onlineResource.attribute('xlink:href'))
format = graphic.firstChildElement('se:Format')
self.assertEqual(expectedFormat, format.firstChild().nodeValue())
def assertStaticPerpendicularOffset(self, root, expectedValue):
offset = root.elementsByTagName('se:PerpendicularOffset').item(0)
self.assertEqual(expectedValue, offset.firstChild().nodeValue())
def assertWellKnownMark(self, root, index, expectedName, expectedFill, expectedStroke, expectedStrokeWidth):
mark = root.elementsByTagName('se:Mark').item(index)
wkn = mark.firstChildElement('se:WellKnownName')
self.assertEqual(expectedName, wkn.text())
fill = mark.firstChildElement('se:Fill')
if expectedFill is None:
self.assertTrue(fill.isNull())
else:
parameter = fill.firstChildElement('se:SvgParameter')
self.assertEqual('fill', parameter.attribute('name'))
self.assertEqual(expectedFill, parameter.text())
stroke = mark.firstChildElement('se:Stroke')
if expectedStroke is None:
self.assertTrue(stroke.isNull())
else:
parameter = stroke.firstChildElement('se:SvgParameter')
self.assertEqual('stroke', parameter.attribute('name'))
self.assertEqual(expectedStroke, parameter.text())
parameter = parameter.nextSiblingElement('se:SvgParameter')
self.assertEqual('stroke-width', parameter.attribute('name'))
self.assertEqual(str(expectedStrokeWidth), parameter.text())
def assertStaticRotation(self, root, expectedValue, index=0):
# Check the rotation element is a literal, not a
rotation = root.elementsByTagName('se:Rotation').item(index)
literal = rotation.firstChild()
self.assertEqual("ogc:Literal", literal.nodeName())
self.assertEqual(expectedValue, literal.firstChild().nodeValue())
def assertStaticDisplacement(self, root, expectedAnchorX, expectedAnchorY):
displacement = root.elementsByTagName('se:Displacement').item(0)
self.assertIsNotNone(displacement)
dx = displacement.firstChild()
self.assertIsNotNone(dx)
self.assertEqual("se:DisplacementX", dx.nodeName())
self.assertSldNumber(expectedAnchorX, dx.firstChild().nodeValue())
dy = displacement.lastChild()
self.assertIsNotNone(dy)
self.assertEqual("se:DisplacementY", dy.nodeName())
self.assertSldNumber(expectedAnchorY, dy.firstChild().nodeValue())
def assertStaticAnchorPoint(self, root, expectedDispX, expectedDispY):
anchor = root.elementsByTagName('se:AnchorPoint').item(0)
self.assertIsNotNone(anchor)
ax = anchor.firstChild()
self.assertIsNotNone(ax)
self.assertEqual("se:AnchorPointX", ax.nodeName())
self.assertSldNumber(expectedDispX, ax.firstChild().nodeValue())
ay = anchor.lastChild()
self.assertIsNotNone(ay)
self.assertEqual("se:AnchorPointY", ay.nodeName())
self.assertSldNumber(expectedDispY, ay.firstChild().nodeValue())
def assertSldNumber(self, expected, stringValue):
value = float(stringValue)
self.assertFloatEquals(expected, value, 0.01)
def assertFloatEquals(self, expected, actual, tol):
self.assertLess(abs(expected - actual), tol, 'Expected %d but was %d' % (expected, actual))
def assertStrokeWidth(self, root, svgParameterIdx, expectedWidth):
strokeWidth = root.elementsByTagName(
'se:SvgParameter').item(svgParameterIdx)
svgParameterName = strokeWidth.attributes().namedItem('name')
self.assertEqual("stroke-width", svgParameterName.nodeValue())
self.assertSldNumber(
expectedWidth, strokeWidth.firstChild().nodeValue())
def symbolToSld(self, symbolLayer):
dom = QDomDocument()
root = dom.createElement("FakeRoot")
dom.appendChild(root)
symbolLayer.toSld(dom, root, {})
return dom, root
def layerToSld(self, mapLayer):
dom = QDomDocument()
root = dom.createElement("FakeRoot")
dom.appendChild(root)
error = None
mapLayer.writeSld(root, dom, error, {})
return dom, root
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
yoer/hue | desktop/core/ext-py/pysaml2-2.4.0/example/idp2_repoze/modules/root.mako.py | 31 | 4279 | # -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1357242050.211483
_template_filename=u'templates/root.mako'
_template_uri=u'root.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
_exports = ['css_link', 'pre', 'post', 'css']
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def pre():
return render_pre(context.locals_(__M_locals))
self = context.get('self', UNDEFINED)
set = context.get('set', UNDEFINED)
def post():
return render_post(context.locals_(__M_locals))
next = context.get('next', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
self.seen_css = set()
__M_writer(u'\n')
# SOURCE LINE 7
__M_writer(u'\n')
# SOURCE LINE 10
__M_writer(u'\n')
# SOURCE LINE 15
__M_writer(u'\n')
# SOURCE LINE 22
__M_writer(u'\n')
# SOURCE LINE 25
__M_writer(u'<html>\n<head><title>IDP test login</title>\n ')
# SOURCE LINE 27
__M_writer(unicode(self.css()))
__M_writer(u'\n <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />\n</head>\n<body>\n ')
# SOURCE LINE 31
__M_writer(unicode(pre()))
__M_writer(u'\n')
# SOURCE LINE 34
__M_writer(unicode(next.body()))
__M_writer(u'\n')
# SOURCE LINE 35
__M_writer(unicode(post()))
__M_writer(u'\n</body>\n</html>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_css_link(context,path,media=''):
context.caller_stack._push_frame()
try:
context._push_buffer()
self = context.get('self', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(u'\n')
# SOURCE LINE 3
if path not in self.seen_css:
# SOURCE LINE 4
__M_writer(u' <link rel="stylesheet" type="text/css" href="')
__M_writer(filters.html_escape(unicode(path)))
__M_writer(u'" media="')
__M_writer(unicode(media))
__M_writer(u'">\n')
pass
# SOURCE LINE 6
__M_writer(u' ')
self.seen_css.add(path)
__M_writer(u'\n')
finally:
__M_buf, __M_writer = context._pop_buffer_and_writer()
context.caller_stack._pop_frame()
__M_writer(filters.trim(__M_buf.getvalue()))
return ''
def render_pre(context):
context.caller_stack._push_frame()
try:
context._push_buffer()
__M_writer = context.writer()
# SOURCE LINE 11
__M_writer(u'\n <div class="header">\n <h1><a href="/">Login</a></h1>\n </div>\n')
finally:
__M_buf, __M_writer = context._pop_buffer_and_writer()
context.caller_stack._pop_frame()
__M_writer(filters.trim(__M_buf.getvalue()))
return ''
def render_post(context):
context.caller_stack._push_frame()
try:
context._push_buffer()
__M_writer = context.writer()
# SOURCE LINE 16
__M_writer(u'\n <div>\n <div class="footer">\n <p>© Copyright 2011 Umeå Universitet </p>\n </div>\n </div>\n')
finally:
__M_buf, __M_writer = context._pop_buffer_and_writer()
context.caller_stack._pop_frame()
__M_writer(filters.trim(__M_buf.getvalue()))
return ''
def render_css(context):
context.caller_stack._push_frame()
try:
context._push_buffer()
def css_link(path,media=''):
return render_css_link(context,path,media)
__M_writer = context.writer()
# SOURCE LINE 8
__M_writer(u'\n ')
# SOURCE LINE 9
__M_writer(unicode(css_link('/css/main.css', 'screen')))
__M_writer(u'\n')
finally:
__M_buf, __M_writer = context._pop_buffer_and_writer()
context.caller_stack._pop_frame()
__M_writer(filters.trim(__M_buf.getvalue()))
return ''
| apache-2.0 |
knighton/mapreduce | mrdomino/map_one_shard.py | 1 | 4762 | import json
import math
import itertools
from os.path import join as path_join
from subprocess import Popen, PIPE
from mrdomino import logger, get_instance, protocol
from mrdomino.util import create_cmd, open_input
def each_input_line(input_files, shard, n_shards):
# assign slices of each file to shards.
slice_assignments = []
for i in range(n_shards):
slice_assignments += [i] * len(input_files)
# get which files this shard is using (partially or the whole file).
a = len(input_files) * shard / n_shards
z = len(input_files) * (shard + 1) / float(n_shards)
z = int(math.ceil(z))
# for each input file, yield the slices we want from it.
for i in range(a, z):
aa = n_shards * i
zz = n_shards * (i + 1)
assign = slice_assignments[aa:zz]
inf_gen = itertools.cycle(range(n_shards))
with open_input(input_files[i], 'r') as fh:
for j, line in itertools.izip(inf_gen, fh):
if shard == assign[j]:
yield line
def map(shard, args):
# find the map function.
job = get_instance(args)
step = job.get_step(args.step_idx)
map_func = step.mapper
n_shards = step.n_mappers
combine_func = step.combiner
assert 0 <= shard < n_shards
if combine_func is None:
out_fn = path_join(args.work_dir, args.output_prefix + '.%d' % shard)
logger.info("mapper {}: output -> {}".format(shard, out_fn))
proc_sort = Popen(['sort', '-o', out_fn], bufsize=4096, stdin=PIPE)
proc = proc_sort
else:
cmd_opts = ['python', '-m', 'mrdomino.combine',
'--job_module', args.job_module,
'--job_class', args.job_class,
'--step_idx', str(args.step_idx),
'--work_dir', args.work_dir,
'--output_prefix', args.output_prefix,
'--shard', str(shard)]
logger.info("mapper {}: starting combiner: {}"
.format(shard, create_cmd(cmd_opts)))
proc_combine = Popen(cmd_opts, bufsize=4096, stdin=PIPE)
proc_sort = Popen(['sort'], bufsize=4096, stdin=PIPE,
stdout=proc_combine.stdin)
proc = proc_combine
if args.step_idx == 0:
# first step
if job.INPUT_PROTOCOL == protocol.JSONProtocol:
unpack_tuple = True
elif job.INPUT_PROTOCOL == protocol.JSONValueProtocol:
unpack_tuple = False
else:
raise ValueError("unsupported protocol: {}"
.format(job.INPUT_PROTOCOL))
elif args.step_idx > 0:
# intermediate step
if job.INTERNAL_PROTOCOL == protocol.JSONProtocol:
unpack_tuple = True
elif job.INTERNAL_PROTOCOL == protocol.JSONValueProtocol:
unpack_tuple = False
else:
raise ValueError("unsupported protocol: {}"
.format(job.INTERNAL_PROTOCOL))
else:
raise ValueError("step_idx={} cannot be negative"
.format(args.step_idx))
# process each line of input and sort for the merge step.
# using with block here ensures that proc_sort.stdin is closed on exit and
# that it won't block the pipeline
count_written = 0
count_seen = 0
with proc_sort.stdin as in_fh:
for line in each_input_line(args.input_files, shard, n_shards):
count_seen += 1
kv = json.loads(line)
k, v = kv if unpack_tuple else (None, kv)
for kv in map_func(k, v):
in_fh.write(json.dumps(kv) + '\n')
count_written += 1
counters = job._counters
counters.incr("mapper", "seen", count_seen)
counters.incr("mapper", "written", count_written)
# write out the counters to file.
f = path_join(args.work_dir, 'map.counters.%d' % shard)
logger.info("mapper {}: counters -> {}".format(shard, f))
with open(f, 'w') as fh:
fh.write(counters.serialize())
# write how many entries were written for reducer balancing purposes.
# note that if combiner is present, we delegate this responsibility to it.
if combine_func is None:
f = path_join(args.work_dir, args.output_prefix + '_count.%d' % shard)
logger.info("mapper {}: lines written -> {}".format(shard, f))
with open(f, 'w') as fh:
fh.write(str(count_written))
# `communicate' will wait for subprocess to terminate
comb_stdout, comb_stderr = proc.communicate()
# finally note that we are done.
f = path_join(args.work_dir, 'map.done.%d' % shard)
logger.info("mapper {}: done -> {}".format(shard, f))
with open(f, 'w') as fh:
fh.write('')
| mit |
jhawkesworth/ansible-modules-core | files/xattr.py | 39 | 6290 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: xattr
version_added: "1.3"
short_description: set/retrieve extended attributes
description:
- Manages filesystem user defined extended attributes, requires that they are enabled
on the target filesystem and that the setfattr/getfattr utilities are present.
options:
name:
required: true
default: None
aliases: ['path']
description:
- The full path of the file/object to get the facts of
key:
required: false
default: None
description:
- The name of a specific Extended attribute key to set/retrieve
value:
required: false
default: None
description:
- The value to set the named name/key to, it automatically sets the C(state) to 'set'
state:
required: false
default: get
choices: [ 'read', 'present', 'all', 'keys', 'absent' ]
description:
- defines which state you want to do.
C(read) retrieves the current value for a C(key) (default)
C(present) sets C(name) to C(value), default if value is set
C(all) dumps all data
C(keys) retrieves all keys
C(absent) deletes the key
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- if yes, dereferences symlinks and sets/gets attributes on symlink target,
otherwise acts on symlink itself.
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Obtain the extended attributes of /etc/foo.conf
- xattr: name=/etc/foo.conf
# Sets the key 'foo' to value 'bar'
- xattr: path=/etc/foo.conf key=user.foo value=bar
# Removes the key 'foo'
- xattr: name=/etc/foo.conf key=user.foo state=absent
'''
import operator
def get_xattr_keys(module,path,follow):
cmd = [ module.get_bin_path('getfattr', True) ]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
cmd.append(path)
return _run_xattr(module,cmd)
def get_xattr(module,path,key,follow):
cmd = [ module.get_bin_path('getfattr', True) ]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
if key is None:
cmd.append('-d')
else:
cmd.append('-n %s' % key)
cmd.append(path)
return _run_xattr(module,cmd,False)
def set_xattr(module,path,key,value,follow):
cmd = [ module.get_bin_path('setfattr', True) ]
if not follow:
cmd.append('-h')
cmd.append('-n %s' % key)
cmd.append('-v %s' % value)
cmd.append(path)
return _run_xattr(module,cmd)
def rm_xattr(module,path,key,follow):
cmd = [ module.get_bin_path('setfattr', True) ]
if not follow:
cmd.append('-h')
cmd.append('-x %s' % key)
cmd.append(path)
return _run_xattr(module,cmd,False)
def _run_xattr(module,cmd,check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception:
e = get_exception()
module.fail_json(msg="%s!" % e.strerror)
#result = {'raw': out}
result = {}
for line in out.splitlines():
if re.match("^#", line) or line == "":
pass
elif re.search('=', line):
(key, val) = line.split("=")
result[key] = val.strip('"')
else:
result[line] = ''
return result
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['path'], type='path'),
key = dict(required=False, default=None, type='str'),
value = dict(required=False, default=None, type='str'),
state = dict(required=False, default='read', choices=[ 'read', 'present', 'all', 'keys', 'absent' ], type='str'),
follow = dict(required=False, type='bool', default=True),
),
supports_check_mode=True,
)
path = module.params.get('name')
key = module.params.get('key')
value = module.params.get('value')
state = module.params.get('state')
follow = module.params.get('follow')
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
changed=False
msg = ""
res = {}
if key is None and state in ['present','absent']:
module.fail_json(msg="%s needs a key parameter" % state)
# All xattr must begin in user namespace
if key is not None and not re.match('^user\.',key):
key = 'user.%s' % key
if (state == 'present' or value is not None):
current=get_xattr(module,path,key,follow)
if current is None or not key in current or value != current[key]:
if not module.check_mode:
res = set_xattr(module,path,key,value,follow)
changed=True
res=current
msg="%s set to %s" % (key, value)
elif state == 'absent':
current=get_xattr(module,path,key,follow)
if current is not None and key in current:
if not module.check_mode:
res = rm_xattr(module,path,key,follow)
changed=True
res=current
msg="%s removed" % (key)
elif state == 'keys':
res=get_xattr_keys(module,path,follow)
msg="returning all keys"
elif state == 'all':
res=get_xattr(module,path,None,follow)
msg="dumping all"
else:
res=get_xattr(module,path,key,follow)
msg="returning %s" % key
module.exit_json(changed=changed, msg=msg, xattr=res)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
tic-ull/defensatfc-proto | tfc_webapps/packages/suds-timestamp/suds/bindings/__init__.py | 1 | 1399 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides modules containing classes to support Web Services (SOAP)
bindings.
"""
class xlstr(unicode):
"""
Language aware string contains a I{lang} attribute.
@ivar lang: The string language when set (may be None).
@type lang: str
"""
__slots__ = ('lang',)
@classmethod
def string(cls, s, lang=None):
return xlstr(s, lang=lang)
def __new__(cls, *args, **kwargs):
lang = kwargs.pop('lang', None)
res = super(xlstr, cls).__new__(cls, *args, **kwargs)
res.lang = lang
return res | agpl-3.0 |
mkret2/root | tutorials/pyroot/hsum.py | 7 | 2238 | ## \file
## \ingroup tutorial_pyroot
## Simple example illustrating how to use the C++ interpreter
##
## \macro_image
## \macro_code
##
## \author Wim Lavrijsen
from ROOT import TCanvas, TH1F, TSlider
from ROOT import gROOT, gBenchmark, gRandom
# Create a new canvas, and customize it.
c1 = TCanvas( 'c1', 'The HSUM example', 200, 10, 600, 400 )
c1.SetGrid();
gBenchmark.Start( 'hsum' )
# Create some histograms.
total = TH1F( 'total', 'This is the total distribution', 100, -4, 4 )
main = TH1F( 'main', 'Main contributor', 100, -4, 4 )
s1 = TH1F( 's1', 'This is the first signal', 100, -4, 4 )
s2 = TH1F( 's2', 'This is the second signal', 100, -4, 4 )
total.Sumw2() # this makes sure that the sum of squares of weights will be stored
# Set canvas/frame attributes.
total.SetMarkerStyle( 21 )
total.SetMarkerSize( 0.7 )
main.SetFillColor( 16 )
s1.SetFillColor( 42 )
s2.SetFillColor( 46 )
# Initialize random number generator.
gRandom.SetSeed()
gauss, landau = gRandom.Gaus, gRandom.Landau
# for speed, bind and cache the Fill member functions
histos = [ 'total', 'main', 's1', 's2' ]
for name in histos:
exec '%sFill = %s.Fill' % (name,name)
# Fill histograms randomly
kUPDATE = 500
for i in xrange( 10000 ):
# Generate random values.
xmain = gauss( -1, 1.5 )
xs1 = gauss( -0.5, 0.5 )
xs2 = landau( 1, 0.15 )
mainFill( xmain )
# Fill histograms.
s1Fill( xs1, 0.3 )
s2Fill( xs2, 0.2 )
totalFill( xmain )
totalFill( xs1, 0.3 )
totalFill( xs2, 0.2 )
# Update display every kUPDATE events.
if i and (i%kUPDATE) == 0 :
if i == kUPDATE :
total.Draw( 'e1p' )
main.Draw( 'same' )
s1.Draw( 'same' )
s2.Draw( 'same' )
c1.Update()
slider = TSlider( 'slider', 'test', 4.2, 0, 4.6, total.GetMaximum(), 38 )
slider.SetFillColor( 46 )
if slider:
slider.SetRange( 0, float(i) / 10000. )
c1.Modified()
c1.Update()
# Destroy member functions cache.
for name in histos:
exec 'del %sFill' % name
del histos
# Done, finalized and trigger an update.
slider.SetRange( 0, 1 )
total.Draw( 'sameaxis' ) # to redraw axis hidden by the fill area
c1.Modified()
c1.Update()
gBenchmark.Show( 'hsum' )
| lgpl-2.1 |
ar45/mysql-connector-python | tests/test_network.py | 7 | 17515 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for mysql.connector.network
"""
import os
import socket
import logging
from collections import deque
import unittest
import tests
from mysql.connector import (network, errors, constants)
LOGGER = logging.getLogger(tests.LOGGER_NAME)
class NetworkTests(tests.MySQLConnectorTests):
"""Testing mysql.connector.network functions"""
def test__prepare_packets(self):
"""Prepare packets for sending"""
data = (b'abcdefghijklmn', 1)
exp = [b'\x0e\x00\x00\x01abcdefghijklmn']
self.assertEqual(exp, network._prepare_packets(*(data)))
data = (b'a' * (constants.MAX_PACKET_LENGTH + 1000), 2)
exp = [
b'\xff\xff\xff\x02' + (b'a' * constants.MAX_PACKET_LENGTH),
b'\xe8\x03\x00\x03' + (b'a' * 1000)
]
self.assertEqual(exp, network._prepare_packets(*(data)))
class BaseMySQLSocketTests(tests.MySQLConnectorTests):
"""Testing mysql.connector.network.BaseMySQLSocket"""
def setUp(self):
config = tests.get_mysql_config()
self._host = config['host']
self._port = config['port']
self.cnx = network.BaseMySQLSocket()
def tearDown(self):
try:
self.cnx.close_connection()
except:
pass
def _get_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
LOGGER.debug("Get socket for {host}:{port}".format(
host=self._host, port=self._port))
sock.connect((self._host, self._port))
return sock
def test_init(self):
"""MySQLSocket initialization"""
exp = {
'sock': None,
'_connection_timeout': None,
'_packet_queue': deque(),
'recvsize': 1024 * 8,
}
for key, value in exp.items():
self.assertEqual(value, self.cnx.__dict__[key])
def test_next_packet_number(self):
"""Test packet number property"""
self.assertEqual(0, self.cnx.next_packet_number)
self.assertEqual(0, self.cnx._packet_number)
self.assertEqual(1, self.cnx.next_packet_number)
self.assertEqual(1, self.cnx._packet_number)
self.cnx._packet_number = 255
self.assertEqual(0, self.cnx.next_packet_number)
def test_open_connection(self):
"""Opening a connection"""
self.assertRaises(NotImplementedError, self.cnx.open_connection)
def test_get_address(self):
"""Get the address of a connection"""
self.assertRaises(NotImplementedError, self.cnx.get_address)
def test_shutdown(self):
"""Shutting down a connection"""
self.cnx.shutdown()
self.assertEqual(None, self.cnx.sock)
def test_close_connection(self):
"""Closing a connection"""
self.cnx.close_connection()
self.assertEqual(None, self.cnx.sock)
def test_send_plain(self):
"""Send plain data through the socket"""
data = b'asddfasdfasdf'
self.assertRaises(errors.OperationalError, self.cnx.send_plain,
data, 0)
self.cnx.sock = tests.DummySocket()
data = [
(b'\x03\x53\x45\x4c\x45\x43\x54\x20\x22\x61\x62\x63\x22', 1),
(b'\x03\x53\x45\x4c\x45\x43\x54\x20\x22'
+ (b'\x61' * (constants.MAX_PACKET_LENGTH + 1000)) + b'\x22', 2)]
self.assertRaises(Exception, self.cnx.send_plain, None, None)
for value in data:
exp = network._prepare_packets(*value)
try:
self.cnx.send_plain(*value)
except errors.Error as err:
self.fail("Failed sending pktnr {}: {}".format(value[1],
str(err)))
self.assertEqual(exp, self.cnx.sock._client_sends)
self.cnx.sock.reset()
def test_send_compressed(self):
"""Send compressed data through the socket"""
data = b'asddfasdfasdf'
self.assertRaises(errors.OperationalError, self.cnx.send_compressed,
data, 0)
self.cnx.sock = tests.DummySocket()
self.assertRaises(Exception, self.cnx.send_compressed, None, None)
# Small packet
data = (b'\x03\x53\x45\x4c\x45\x43\x54\x20\x22\x61\x62\x63\x22', 1)
exp = [b'\x11\x00\x00\x00\x00\x00\x00\r\x00\x00\x01\x03SELECT "abc"']
try:
self.cnx.send_compressed(*data)
except errors.Error as err:
self.fail("Failed sending pktnr {}: {}".format(data[1], err))
self.assertEqual(exp, self.cnx.sock._client_sends)
self.cnx.sock.reset()
# Slightly bigger packet (not getting compressed)
data = (b'\x03\x53\x45\x4c\x45\x43\x54\x20\x22\x61\x62\x63\x22', 1)
exp = (24, b'\x11\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x01\x03'
b'\x53\x45\x4c\x45\x43\x54\x20\x22')
try:
self.cnx.send_compressed(*data)
except errors.Error as err:
self.fail("Failed sending pktnr {}: {}".format(data[1], str(err)))
received = self.cnx.sock._client_sends[0]
self.assertEqual(exp, (len(received), received[:20]))
self.cnx.sock.reset()
# Big packet
data = (b'\x03\x53\x45\x4c\x45\x43\x54\x20\x22'
+ b'\x61' * (constants.MAX_PACKET_LENGTH + 1000) + b'\x22', 2)
exp = [
(63, b'\x38\x00\x00\x00\x00\x40\x00\x78\x9c\xed\xc1\x31'
b'\x0d\x00\x20\x0c\x00\xb0\x04\x8c'),
(16322, b'\xbb\x3f\x00\x01\xf9\xc3\xff\x78\x9c\xec\xc1\x81'
b'\x00\x00\x00\x00\x80\x20\xd6\xfd')]
try:
self.cnx.send_compressed(*data)
except errors.Error as err:
self.fail("Failed sending pktnr {}: {}".format(data[1], str(err)))
received = [(len(r), r[:20]) for r in self.cnx.sock._client_sends]
self.assertEqual(exp, received)
self.cnx.sock.reset()
def test_recv_plain(self):
"""Receive data from the socket"""
self.cnx.sock = tests.DummySocket()
def get_address():
return 'dummy'
self.cnx.get_address = get_address
# Receive a packet which is not 4 bytes long
self.cnx.sock.add_packet(b'\01\01\01')
self.assertRaises(errors.InterfaceError, self.cnx.recv_plain)
# Socket fails to receive and produces an error
self.cnx.sock.raise_socket_error()
self.assertRaises(errors.OperationalError, self.cnx.recv_plain)
# Receive packets after a query, SELECT "Ham"
exp = [
b'\x01\x00\x00\x01\x01',
b'\x19\x00\x00\x02\x03\x64\x65\x66\x00\x00\x00\x03\x48\x61\x6d\x00'
b'\x0c\x21\x00\x09\x00\x00\x00\xfd\x01\x00\x1f\x00\x00',
b'\x05\x00\x00\x03\xfe\x00\x00\x02\x00',
b'\x04\x00\x00\x04\x03\x48\x61\x6d',
b'\x05\x00\x00\x05\xfe\x00\x00\x02\x00',
]
self.cnx.sock.reset()
self.cnx.sock.add_packets(exp)
length_exp = len(exp)
result = []
packet = self.cnx.recv_plain()
while packet:
result.append(packet)
if length_exp == len(result):
break
packet = self.cnx.recv_plain()
self.assertEqual(exp, result)
def test_recv_compressed(self):
"""Receive compressed data from the socket"""
self.cnx.sock = tests.DummySocket()
def get_address():
return 'dummy'
self.cnx.get_address = get_address
# Receive a packet which is not 7 bytes long
self.cnx.sock.add_packet(b'\01\01\01\01\01\01')
self.assertRaises(errors.InterfaceError, self.cnx.recv_compressed)
# Receive the header of a packet, but nothing more
self.cnx.sock.add_packet(b'\01\00\00\00\00\00\00')
self.assertRaises(errors.InterfaceError, self.cnx.recv_compressed)
# Socket fails to receive and produces an error
self.cnx.sock.raise_socket_error()
self.assertRaises(errors.OperationalError, self.cnx.recv_compressed)
# Receive result of query SELECT REPEAT('a',1*1024*1024), 'XYZ'
packets = (
b'\x80\x00\x00\x01\x00\x40\x00\x78\x9c\xed\xcb\xbd\x0a\x81\x01'
b'\x14\xc7\xe1\xff\xeb\xa5\x28\x83\x4d\x26\x99\x7c\x44\x21\x37'
b'\x60\xb0\x4b\x06\x6c\x0a\xd7\xe3\x7a\x15\x79\xc9\xec\x0a\x9e'
b'\x67\x38\x9d\xd3\xaf\x53\x24\x45\x6d\x96\xd4\xca\xcb\xf5\x96'
b'\xa4\xbb\xdb\x6c\x37\xeb\xfd\x68\x78\x1e\x4e\x17\x93\xc5\x7c'
b'\xb9\xfa\x8e\x71\xda\x83\xaa\xde\xf3\x28\xd2\x4f\x7a\x49\xf9'
b'\x7b\x28\x0f\xc7\xd3\x27\xb6\xaa\xfd\xf9\x8d\x8d\xa4\xfe\xaa'
b'\xae\x34\xd3\x69\x3c\x93\xce\x19\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xeb\x0d\xe7\xa5\x29\xb8',
b'\x05\x04\x00\x02\x68\xc0\x0f\x78\x9c\xed\xc1\x31\x01\x00\x00'
b'\x08\x03\xa0\xc3\x92\xea\xb7\xfe\x25\x8c\x60\x01\x20\x01' +
b'\x00' * 999 + b'\xe0\x53\x3d\x7b\x0a\x29\x40\x7b'
)
exp = [
b'\x01\x00\x00\x01\x02',
b'\x2d\x00\x00\x02\x03\x64\x65\x66\x00\x00\x00\x17\x52\x45\x50'
b'\x45\x41\x54\x28\x27\x61\x27\x2c\x31\x2a\x31\x30\x32\x34\x2a'
b'\x31\x30\x32\x34\x29\x00\x0c\x21\x00\x00\x00\x90\x00\xfa\x01'
b'\x00\x1f\x00\x00',
b'\x19\x00\x00\x03\x03\x64\x65\x66\x00\x00\x00\x03\x58\x59\x5a'
b'\x00\x0c\x21\x00\x09\x00\x00\x00\xfd\x01\x00\x1f\x00\x00',
b'\x05\x00\x00\x04\xfe\x00\x00\x00\x00',
b'\x08\x00\x10\x05\xfd\x00\x00\x10' +
b'\x61' * 1 * 1024 * 1024 + b'\x03\x58\x59\x5a'
]
self.cnx.sock.reset()
self.cnx.sock.add_packets(packets)
length_exp = len(exp)
packet = self.cnx.recv_compressed()
counter = 0
while packet and counter < length_exp:
self.assertEqual(exp[counter], packet)
packet = self.cnx.recv_compressed()
counter += 1
def test_set_connection_timeout(self):
"""Set the connection timeout"""
exp = 5
self.cnx.set_connection_timeout(exp)
self.assertEqual(exp, self.cnx._connection_timeout)
@unittest.skipIf(os.name == 'nt', "Skip UNIX Socket tests on Windows")
class MySQLUnixSocketTests(tests.MySQLConnectorTests):
"""Testing mysql.connector.network.MySQLUnixSocket"""
def setUp(self):
config = tests.get_mysql_config()
self._unix_socket = config['unix_socket']
self.cnx = network.MySQLUnixSocket(unix_socket=config['unix_socket'])
def tearDown(self):
try:
self.cnx.close_connection()
except:
pass
def test_init(self):
"""MySQLUnixSocket initialization"""
exp = {
'unix_socket': self._unix_socket,
}
for key, value in exp.items():
self.assertEqual(value, self.cnx.__dict__[key])
def test_get_address(self):
"""Get path to the Unix socket"""
exp = self._unix_socket
self.assertEqual(exp, self.cnx.get_address())
def test_open_connection(self):
"""Open a connection using a Unix socket"""
if os.name == 'nt':
self.assertRaises(errors.InterfaceError, self.cnx.open_connection)
else:
try:
self.cnx.open_connection()
except errors.Error as err:
self.fail(str(err))
@unittest.skipIf(not tests.SSL_AVAILABLE,
"Could not test switch to SSL. Make sure Python supports "
"SSL.")
def test_switch_to_ssl(self):
"""Switch the socket to use SSL"""
args = {
'ca': os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'),
'cert': os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'),
'key': os.path.join(tests.SSL_DIR, 'tests_client_key.pem'),
}
self.assertRaises(errors.InterfaceError,
self.cnx.switch_to_ssl, **args)
# Handshake failure
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect(self._unix_socket)
self.cnx.sock = sock
self.assertRaises(errors.InterfaceError,
self.cnx.switch_to_ssl, **args)
class MySQLTCPSocketTests(tests.MySQLConnectorTests):
"""Testing mysql.connector.network..MySQLTCPSocket"""
def setUp(self):
config = tests.get_mysql_config()
self._host = config['host']
self._port = config['port']
self.cnx = network.MySQLTCPSocket(host=self._host, port=self._port)
def tearDown(self):
try:
self.cnx.close_connection()
except:
pass
def test_init(self):
"""MySQLTCPSocket initialization"""
exp = {
'server_host': self._host,
'server_port': self._port,
}
for key, value in exp.items():
self.assertEqual(value, self.cnx.__dict__[key])
def test_get_address(self):
"""Get TCP/IP address"""
exp = "%s:%s" % (self._host, self._port)
self.assertEqual(exp, self.cnx.get_address())
@unittest.skipIf(tests.IPV6_AVAILABLE, "Testing IPv6, not testing IPv4")
def test_open_connection__ipv4(self):
"""Open a connection using TCP"""
try:
self.cnx.open_connection()
except errors.Error as err:
self.fail(str(err))
config = tests.get_mysql_config()
self._host = config['host']
self._port = config['port']
cases = [
# Address, Expected Family, Should Raise, Force IPv6
(tests.get_mysql_config()['host'], socket.AF_INET, False, False),
]
for case in cases:
self._test_open_connection(*case)
@unittest.skipIf(not tests.IPV6_AVAILABLE, "IPv6 testing disabled")
def test_open_connection__ipv6(self):
"""Open a connection using TCP"""
config = tests.get_mysql_config()
self._host = config['host']
self._port = config['port']
cases = [
# Address, Expected Family, Should Raise, Force IPv6
('::1', socket.AF_INET6, False, False),
('2001::14:06:77', socket.AF_INET6, True, False),
('xx:00:xx', socket.AF_INET6, True, False),
]
for case in cases:
self._test_open_connection(*case)
def _test_open_connection(self, addr, family, should_raise, force):
try:
sock = network.MySQLTCPSocket(host=addr,
port=self._port,
force_ipv6=force)
sock.set_connection_timeout(1)
sock.open_connection()
except (errors.InterfaceError, socket.error):
if not should_raise:
self.fail('{0} incorrectly raised socket.error'.format(
addr))
else:
if should_raise:
self.fail('{0} should have raised socket.error'.format(
addr))
else:
self.assertEqual(family, sock._family,
"Family for {0} did not match".format(
addr, family, sock._family))
sock.close_connection()
@unittest.skipIf(not tests.SSL_AVAILABLE,
"Could not test switch to SSL. Make sure Python supports "
"SSL.")
def test_switch_to_ssl(self):
"""Switch the socket to use SSL"""
args = {
'ca': os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'),
'cert': os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'),
'key': os.path.join(tests.SSL_DIR, 'tests_client_key.pem'),
}
self.assertRaises(errors.InterfaceError,
self.cnx.switch_to_ssl, **args)
# Handshake failure
(family, socktype, proto, _,
sockaddr) = socket.getaddrinfo(self._host, self._port)[0]
sock = socket.socket(family, socktype, proto)
sock.settimeout(4)
sock.connect(sockaddr)
self.cnx.sock = sock
self.assertRaises(errors.InterfaceError,
self.cnx.switch_to_ssl, **args)
| gpl-2.0 |
karllessard/tensorflow | tensorflow/lite/micro/examples/magic_wand/train/train.py | 12 | 7021 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=redefined-outer-name
# pylint: disable=g-bad-import-order
"""Build and train neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
from data_load import DataLoader
import numpy as np
import tensorflow as tf
logdir = "logs/scalars/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
def reshape_function(data, label):
reshaped_data = tf.reshape(data, [-1, 3, 1])
return reshaped_data, label
def calculate_model_size(model):
print(model.summary())
var_sizes = [
np.product(list(map(int, v.shape))) * v.dtype.size
for v in model.trainable_variables
]
print("Model size:", sum(var_sizes) / 1024, "KB")
def build_cnn(seq_length):
"""Builds a convolutional neural network in Keras."""
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
8, (4, 3),
padding="same",
activation="relu",
input_shape=(seq_length, 3, 1)), # output_shape=(batch, 128, 3, 8)
tf.keras.layers.MaxPool2D((3, 3)), # (batch, 42, 1, 8)
tf.keras.layers.Dropout(0.1), # (batch, 42, 1, 8)
tf.keras.layers.Conv2D(16, (4, 1), padding="same",
activation="relu"), # (batch, 42, 1, 16)
tf.keras.layers.MaxPool2D((3, 1), padding="same"), # (batch, 14, 1, 16)
tf.keras.layers.Dropout(0.1), # (batch, 14, 1, 16)
tf.keras.layers.Flatten(), # (batch, 224)
tf.keras.layers.Dense(16, activation="relu"), # (batch, 16)
tf.keras.layers.Dropout(0.1), # (batch, 16)
tf.keras.layers.Dense(4, activation="softmax") # (batch, 4)
])
model_path = os.path.join("./netmodels", "CNN")
print("Built CNN.")
if not os.path.exists(model_path):
os.makedirs(model_path)
model.load_weights("./netmodels/CNN/weights.h5")
return model, model_path
def build_lstm(seq_length):
"""Builds an LSTM in Keras."""
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(22),
input_shape=(seq_length, 3)), # output_shape=(batch, 44)
tf.keras.layers.Dense(4, activation="sigmoid") # (batch, 4)
])
model_path = os.path.join("./netmodels", "LSTM")
print("Built LSTM.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path
def load_data(train_data_path, valid_data_path, test_data_path, seq_length):
data_loader = DataLoader(
train_data_path, valid_data_path, test_data_path, seq_length=seq_length)
data_loader.format()
return data_loader.train_len, data_loader.train_data, data_loader.valid_len, \
data_loader.valid_data, data_loader.test_len, data_loader.test_data
def build_net(args, seq_length):
if args.model == "CNN":
model, model_path = build_cnn(seq_length)
elif args.model == "LSTM":
model, model_path = build_lstm(seq_length)
else:
print("Please input correct model name.(CNN LSTM)")
return model, model_path
def train_net(
model,
model_path, # pylint: disable=unused-argument
train_len, # pylint: disable=unused-argument
train_data,
valid_len,
valid_data, # pylint: disable=unused-argument
test_len,
test_data,
kind):
"""Trains the model."""
calculate_model_size(model)
epochs = 50
batch_size = 64
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
if kind == "CNN":
train_data = train_data.map(reshape_function)
test_data = test_data.map(reshape_function)
valid_data = valid_data.map(reshape_function)
test_labels = np.zeros(test_len)
idx = 0
for data, label in test_data: # pylint: disable=unused-variable
test_labels[idx] = label.numpy()
idx += 1
train_data = train_data.batch(batch_size).repeat()
valid_data = valid_data.batch(batch_size)
test_data = test_data.batch(batch_size)
model.fit(
train_data,
epochs=epochs,
validation_data=valid_data,
steps_per_epoch=1000,
validation_steps=int((valid_len - 1) / batch_size + 1),
callbacks=[tensorboard_callback])
loss, acc = model.evaluate(test_data)
pred = np.argmax(model.predict(test_data), axis=1)
confusion = tf.math.confusion_matrix(
labels=tf.constant(test_labels),
predictions=tf.constant(pred),
num_classes=4)
print(confusion)
print("Loss {}, Accuracy {}".format(loss, acc))
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model to disk
open("model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
# Save the model to disk
open("model_quantized.tflite", "wb").write(tflite_model)
basic_model_size = os.path.getsize("model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m")
parser.add_argument("--person", "-p")
args = parser.parse_args()
seq_length = 128
print("Start to load data...")
if args.person == "true":
train_len, train_data, valid_len, valid_data, test_len, test_data = \
load_data("./person_split/train", "./person_split/valid",
"./person_split/test", seq_length)
else:
train_len, train_data, valid_len, valid_data, test_len, test_data = \
load_data("./data/train", "./data/valid", "./data/test", seq_length)
print("Start to build net...")
model, model_path = build_net(args, seq_length)
print("Start training...")
train_net(model, model_path, train_len, train_data, valid_len, valid_data,
test_len, test_data, args.model)
print("Training finished!")
| apache-2.0 |
O4siang/SDN_GUI | main.py | 1 | 1380 | # -*- coding: utf-8 -*-
from flask import Flask
from flask import render_template, jsonify, request, g, abort, redirect, url_for
from rest import Rest
import json
app = Flask(__name__)
global_flow_table = {}
# mainpage
@app.route("/")
def main():
print "test"
sw_desc = Rest.get_switch_desc()
print sw_desc
sw_dpid_list = Rest.get_switch_list()
print sw_dpid_list
#if sw_desc is False or int(dpid) not in sw_dpid_list:
# sw_desc = "not connected"
for dpid in sw_dpid_list:
print dpid
dpid_t = dpid
flow_table = None
flow_table = Rest.get_flow_table(dpid)
global global_flow_table
global_flow_table = flow_table
port = Rest.get_switch_port(dpid)
print "test"
return render_template("index.html",sw_desc=sw_desc,dpid=dpid_t, port=port, flow_table=flow_table)
@app.route("/add", methods=['POST'])
def add():
usertable = {}
usertable["username"] = request.form['username']
usertable["mac"] = request.form['mac']
usertable["port"] = request.form['port']
print json.dumps(usertable)
Rest.add_user(json.dumps(usertable))
return redirect(url_for('main'))
@app.errorhandler(404)
def page_not_found(error):
return render_template("page_not_found.html"), 404
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5566, debug=False)
| mit |
moijes12/oh-mainline | vendor/packages/oauthlib/oauthlib/oauth2/rfc6749/endpoints/base.py | 87 | 1729 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import functools
import logging
from ..errors import TemporarilyUnavailableError, ServerError
from ..errors import FatalClientError, OAuth2Error
log = logging.getLogger(__name__)
class BaseEndpoint(object):
def __init__(self):
self._available = True
self._catch_errors = False
@property
def available(self):
return self._available
@available.setter
def available(self, available):
self._available = available
@property
def catch_errors(self):
return self._catch_errors
@catch_errors.setter
def catch_errors(self, catch_errors):
self._catch_errors = catch_errors
def catch_errors_and_unavailability(f):
@functools.wraps(f)
def wrapper(endpoint, uri, *args, **kwargs):
if not endpoint.available:
e = TemporarilyUnavailableError()
log.info('Endpoint unavailable, ignoring request %s.' % uri)
return {}, e.json, 503
if endpoint.catch_errors:
try:
return f(endpoint, uri, *args, **kwargs)
except OAuth2Error:
raise
except FatalClientError:
raise
except Exception as e:
error = ServerError()
log.warning(
'Exception caught while processing request, %s.' % e)
return {}, error.json, 500
else:
return f(endpoint, uri, *args, **kwargs)
return wrapper
| agpl-3.0 |
tetravision/Test | stem/interpreter/__init__.py | 1 | 3979 | # Copyright 2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Interactive interpreter for interacting with Tor directly. This adds usability
features such as tab completion, history, and IRC-style functions (like /help).
"""
__all__ = [
'arguments',
'autocomplete',
'commands',
'help',
]
import os
import sys
import stem
import stem.connection
import stem.prereq
import stem.process
import stem.util.conf
import stem.util.system
import stem.util.term
from stem.util.term import Attr, Color, format
PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE)
STANDARD_OUTPUT = (Color.BLUE, )
BOLD_OUTPUT = (Color.BLUE, Attr.BOLD)
HEADER_OUTPUT = (Color.GREEN, )
HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD)
ERROR_OUTPUT = (Attr.BOLD, Color.RED)
settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path)
@uses_settings
def msg(message, config, **attr):
return config.get(message).format(**attr)
def main():
import readline
import stem.interpreter.arguments
import stem.interpreter.autocomplete
import stem.interpreter.commands
try:
args = stem.interpreter.arguments.parse(sys.argv[1:])
except ValueError as exc:
print(exc)
sys.exit(1)
if args.print_help:
print(stem.interpreter.arguments.get_help())
sys.exit()
if args.disable_color:
global PROMPT
stem.util.term.DISABLE_COLOR_SUPPORT = True
PROMPT = '>>> '
# If the user isn't connecting to something in particular then offer to start
# tor if it isn't running.
if not (args.user_provided_port or args.user_provided_socket):
is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real')
if not is_tor_running:
if not stem.util.system.is_available('tor'):
print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT))
sys.exit(1)
else:
print(format(msg('msg.starting_tor'), *HEADER_OUTPUT))
control_port = '9051' if args.control_port == 'default' else str(args.control_port)
stem.process.launch_tor_with_config(
config = {
'SocksPort': '0',
'ControlPort': control_port,
'CookieAuthentication': '1',
'ExitPolicy': 'reject *:*',
},
completion_percent = 5,
take_ownership = True,
)
control_port = (args.control_address, args.control_port)
control_socket = args.control_socket
# If the user explicitely specified an endpoint then just try to connect to
# that.
if args.user_provided_socket and not args.user_provided_port:
control_port = None
elif args.user_provided_port and not args.user_provided_socket:
control_socket = None
controller = stem.connection.connect(
control_port = control_port,
control_socket = control_socket,
password_prompt = True,
)
if controller is None:
sys.exit(1)
with controller:
autocompleter = stem.interpreter.autocomplete.Autocompleter(controller)
readline.parse_and_bind('tab: complete')
readline.set_completer(autocompleter.complete)
readline.set_completer_delims('\n')
interpreter = stem.interpreter.commands.ControlInterpretor(controller)
for line in msg('msg.startup_banner').splitlines():
line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT
print(format(line, *line_format))
print('')
while True:
try:
prompt = '... ' if interpreter.is_multiline_context else PROMPT
if stem.prereq.is_python_3():
user_input = input(prompt)
else:
user_input = raw_input(prompt)
response = interpreter.run_command(user_input)
if response is not None:
print(response)
except (KeyboardInterrupt, EOFError, stem.SocketClosed) as exc:
print('') # move cursor to the following line
break
| lgpl-3.0 |
dc3-plaso/dfvfs | tests/vfs/fvde_file_entry.py | 1 | 4808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using pyfvde."""
import unittest
from dfvfs.path import fvde_path_spec
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.path import tsk_partition_path_spec
from dfvfs.resolver import context
from dfvfs.resolver import resolver
from dfvfs.vfs import fvde_file_entry
from dfvfs.vfs import fvde_file_system
from tests import test_lib as shared_test_lib
@shared_test_lib.skipUnlessHasTestFile([u'fvdetest.qcow2'])
class FVDEFileEntryTest(shared_test_lib.BaseTestCase):
"""The unit test for the FVDE file entry object."""
_FVDE_PASSWORD = u'fvde-TEST'
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath([u'fvdetest.qcow2'])
path_spec = os_path_spec.OSPathSpec(location=test_file)
path_spec = qcow_path_spec.QCOWPathSpec(parent=path_spec)
path_spec = tsk_partition_path_spec.TSKPartitionPathSpec(
location=u'/p1', parent=path_spec)
self._fvde_path_spec = fvde_path_spec.FVDEPathSpec(parent=path_spec)
resolver.Resolver.key_chain.SetCredential(
self._fvde_path_spec, u'password', self._FVDE_PASSWORD)
self._file_system = fvde_file_system.FVDEFileSystem(self._resolver_context)
self._file_system.Open(self._fvde_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
def testIntialize(self):
"""Test the __init__ function."""
file_entry = fvde_file_entry.FVDEFileEntry(
self._resolver_context, self._file_system, self._fvde_path_spec)
self.assertIsNotNone(file_entry)
def testGetFileEntryByPathSpec(self):
"""Test the get a file entry by path specification functionality."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNone(parent_file_entry)
def testGetStat(self):
"""Tests the GetStat function."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry.GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 167772160)
def testIsFunctions(self):
"""Test the Is? functions."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
self.assertTrue(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Test the sub file entries iteration functionality."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 0)
expected_sub_file_entry_names = []
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), expected_sub_file_entry_names)
def testDataStreams(self):
"""Test the data streams functionality."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [u''])
def testGetDataStream(self):
"""Tests the GetDataStream function."""
file_entry = self._file_system.GetFileEntryByPathSpec(self._fvde_path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = u''
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
self.assertEqual(data_stream.name, data_stream_name)
data_stream = file_entry.GetDataStream(u'bogus')
self.assertIsNone(data_stream)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
MenZil/kuma | vendor/packages/pygments/lexers/ruby.py | 72 | 22142 | # -*- coding: utf-8 -*-
"""
pygments.lexers.ruby
~~~~~~~~~~~~~~~~~~~~
Lexers for Ruby and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \
bygroups, default, LexerContext, do_insertions, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Generic
from pygments.util import shebang_matches
__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer']
line_re = re.compile('.*?\n')
RUBY_OPERATORS = (
'*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~',
'[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '==='
)
class RubyLexer(ExtendedRegexLexer):
"""
For `Ruby <http://www.ruby-lang.org>`_ source code.
"""
name = 'Ruby'
aliases = ['rb', 'ruby', 'duby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
'*.rbx', '*.duby']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), Name.Constant, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), Name.Constant, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
(words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
(r'([a-zA-Z_]\w*)(:)(?!:)',
bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
(lbrace, String.Regex, '#push'),
(rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other),
(r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end.*?$', Comment.Multiline),
# keywords
(words((
'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?',
'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo',
'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef',
'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'),
Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_]\w*'
r'(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# special methods
(words((
'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader',
'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private',
'module_function', 'public', 'protected', 'true', 'false', 'nil'),
suffix=r'\b'),
Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(words((
'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include',
'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil',
'private_method_defined', 'protected_method_defined',
'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'),
Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(words((
'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort',
'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller',
'catch', 'chomp', 'chop', 'class_eval', 'class_variables',
'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set',
'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork',
'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub',
'hash', 'id', 'included_modules', 'inspect', 'instance_eval',
'instance_method', 'instance_methods',
'instance_variable_get', 'instance_variable_set', 'instance_variables',
'lambda', 'load', 'local_variables', 'loop',
'method', 'method_missing', 'methods', 'module_eval', 'name',
'object_id', 'open', 'p', 'print', 'printf', 'private_class_method',
'private_instance_methods',
'private_methods', 'proc', 'protected_instance_methods',
'protected_methods', 'public_class_method',
'public_instance_methods', 'public_methods',
'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require',
'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep',
'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint',
'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint',
'untrace_var', 'warn'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=(?:\s|;|\.)index\s)|'
r'(?<=(?:\s|;|\.)scan\s)|'
r'(?<=(?:\s|;|\.)sub\s)|'
r'(?<=(?:\s|;|\.)sub!\s)|'
r'(?<=(?:\s|;|\.)gsub\s)|'
r'(?<=(?:\s|;|\.)gsub!\s)|'
r'(?<=(?:\s|;|\.)match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z]\w+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(words(RUBY_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_]\w*[!?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_]\w*', Name.Class, '#pop'),
default('#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_]\w*', String.Interpol),
(r'#\$[a-zA-Z_]\w*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})',
String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile('irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] '
'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, rblexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(
insertions, rblexer.get_tokens_unprocessed(curcode)):
yield item
class FancyLexer(RegexLexer):
"""
Pygments Lexer For `Fancy <http://www.fancy-lang.org/>`_.
Fancy is a self-hosted, pure object-oriented, dynamic,
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
.. versionadded:: 1.5
"""
name = 'Fancy'
filenames = ['*.fy', '*.fancypack']
aliases = ['fancy', 'fy']
mimetypes = ['text/x-fancysrc']
tokens = {
# copied from PerlLexer:
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\s+', Text),
# balanced delimiters (copied from PerlLexer):
(r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
# Comments
(r'#(.*?)\n', Comment.Single),
# Symbols
(r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol),
# Multi-line DoubleQuotedString
(r'"""(\\\\|\\"|[^"])*"""', String),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# keywords
(r'(def|class|try|catch|finally|retry|return|return_local|match|'
r'case|->|=>)\b', Keyword),
# constants
(r'(self|super|nil|false|true)\b', Name.Constant),
(r'[(){};,/?|:\\]', Punctuation),
# names
(words((
'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String',
'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass',
'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set',
'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'),
Name.Builtin),
# functions
(r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function),
# operators, must be below functions
(r'[-+*/~,<>=&!?%^\[\].$]+', Operator),
('[A-Z]\w*', Name.Constant),
('@[a-zA-Z_]\w*', Name.Variable.Instance),
('@@[a-zA-Z_]\w*', Name.Variable.Class),
('@@?', Operator),
('[a-zA-Z_]\w*', Name),
# numbers - / checks are necessary to avoid mismarking regexes,
# see comment in RubyLexer
(r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
(r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
]
}
| mpl-2.0 |
mohierf/mod-webui | module/plugins/minemap/minemap.py | 1 | 2420 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Mohier Frederic [email protected]
# Karfusehr Andreas, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.log import logger
# Will be populated by the UI with it's own value
app = None
def show_minemap():
user = app.request.environ['USER']
# Apply search filter if exists ...
search = app.request.query.get('search', "type:host")
if "type:host" not in search:
search = "type:host " + search
logger.debug("[WebUI-minemap] search parameters '%s'", search)
items = app.datamgr.search_hosts_and_services(search, user)
logger.info("[WebUI-minemap] got %d matching items: %s", len(items), items)
# Fetch elements per page preference for user, default is 25
elts_per_page = app.prefs_module.get_ui_user_preference(user, 'elts_per_page', 25)
# We want to limit the number of elements
step = int(app.request.GET.get('step', elts_per_page))
start = int(app.request.GET.get('start', '0'))
end = int(app.request.GET.get('end', start + step))
# If we overflow, came back as normal
total = len(items)
if start > total:
start = 0
end = step
navi = app.helper.get_navi(total, start, step=step)
return {'navi': navi, 'items': items[start:end], 'page': "minemap"}
def show_minemaps():
app.bottle.redirect("/minemap/all")
# Load plugin configuration parameters
# load_cfg()
pages = {
show_minemap: {
'name': 'Minemap', 'route': '/minemap', 'view': 'minemap', 'search_engine': True,
'static': True
},
show_minemaps: {
'name': 'Minemaps', 'route': '/minemaps', 'view': 'minemap',
'static': True
}
}
| agpl-3.0 |
Root-Box/external_chromium | testing/gmock/test/gmock_test_utils.py | 222 | 3579 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes Subprocess from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
# Exposes TestCase from gtest_test_utils.
TestCase = gtest_test_utils.TestCase
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| bsd-3-clause |
f123h456/gumbo-parser | python/gumbo/html5lib_adapter.py | 12 | 5095 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Adapter between Gumbo and html5lib.
This exports one method, parse, with the same signature as html5lib.parse. It
takes the text to parse, and optionally an html5lib TreeBuilder to build the
tree, and gives back a DOM tree in that format. Example:
doc = parse(text, treebuilder='lxml')
"""
__author__ = '[email protected] (Jonathan Tang)'
import gumboc
# These should match html5lib.constants.namespaces, and be indexed by the enum
# values of gumboc.Namespace
_NAMESPACES = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/2000/svg',
'http://www.w3.org/1998/Math/MathML',
]
def _convert_doctype(treebuilder, source_node):
if not source_node.has_doctype:
# Mimic html5lib behavior: if no doctype token, no doctype node.
return
treebuilder.insertDoctype({
'name': source_node.name.decode('utf-8'),
'publicId': source_node.public_identifier.decode('utf-8'),
'systemId': source_node.system_identifier.decode('utf-8'),
})
def _convert_attributes(source_node):
def maybe_namespace(attr):
if attr.namespace != gumboc.AttributeNamespace.NONE:
return (repr(attr.namespace).lower() if attr.name != 'xmlns' else None,
attr.name.decode('utf-8'),
attr.namespace.to_url())
else:
return attr.name.decode('utf-8')
return dict((maybe_namespace(attr), attr.value.decode('utf-8'))
for attr in source_node.attributes)
def _convert_element(source_node):
if source_node.type not in ( gumboc.NodeType.ELEMENT, gumboc.NodeType.TEMPLATE):
# If-statement instead of assert so it runs with -O
raise AssertionError(
'_convert_element only works with elements; found %r' %
source_node.type)
return {
'name': source_node.v.element.tag_name.decode('utf-8'),
'namespace': _NAMESPACES[source_node.v.element.tag_namespace.value],
'data': _convert_attributes(source_node),
}
def _insert_root(treebuilder, source_node, pop_element = True):
treebuilder.insertRoot(_convert_element(source_node))
for child_node in source_node.children:
_insert_node(treebuilder, child_node)
if pop_element:
treebuilder.openElements.pop()
def _insert_node(treebuilder, source_node):
assert source_node.type != gumboc.NodeType.DOCUMENT
if source_node.type == gumboc.NodeType.COMMENT:
treebuilder.insertComment({'data': source_node.v.text.text.decode('utf-8')})
elif source_node.type in (
gumboc.NodeType.TEXT,
gumboc.NodeType.WHITESPACE,
gumboc.NodeType.CDATA):
treebuilder.insertText(source_node.v.text.text.decode('utf-8'))
else:
treebuilder.insertElementNormal(_convert_element(source_node))
for child_node in source_node.v.element.children:
_insert_node(treebuilder, child_node)
treebuilder.openElements.pop()
class HTMLParser(object):
def __init__(self, tree):
self.tree = tree
def parse(self, text_or_file, **kwargs):
try:
text = text_or_file.read()
except AttributeError:
# Assume a string.
text = text_or_file
with gumboc.parse(text, **kwargs) as output:
_convert_doctype(self.tree, output.contents.document.contents)
for node in output.contents.document.contents.children:
if node.type == gumboc.NodeType.COMMENT:
self.tree.insertComment({'data': node.v.text.text.decode('utf-8')},
self.tree.document)
elif node.type in (gumboc.NodeType.ELEMENT, gumboc.NodeType.TEMPLATE):
_insert_root(self.tree, output.contents.root.contents)
else:
assert 'Only comments and <html> nodes allowed at the root'
return self.tree.getDocument()
def parseFragment(self, text_or_file, container, **kwargs):
try:
text = text_or_file.read()
except AttributeError:
# Assume a string.
text = text_or_file
if ' ' in container:
container_ns, container = container.split(' ')
else:
container_ns = "html"
with gumboc.parse(
text,
fragment_context=gumboc.Tag.from_str(container),
fragment_namespace=getattr(gumboc.Namespace, container_ns.upper()),
**kwargs) as output:
for node in output.contents.document.contents.children:
if node.type in (gumboc.NodeType.ELEMENT, gumboc.NodeType.TEMPLATE):
_insert_root(self.tree, output.contents.root.contents, False)
else:
assert 'Malformed fragment parse (??)'
return self.tree.getFragment()
| apache-2.0 |
liqin75/vse-vpnaas-plugin | vpn-resttest/vpn.py | 1 | 10329 | #!/usr/bin/python
import json
import sys
import os
import lib
vpnService = lib.qService + '/vpn'
tenantId="202e69af6fc14e449744140034f977e5"
edgeUri = "10.117.35.38"
edgeId = 'edge-1'
edgeUser = 'admin'
edgePasswd = 'default'
def createSite(tenant_id, subnet_id, name, description, local_endpoint,
local_id, peer_endpoint, peer_id, pri_networks, psk, mtu, location=None):
request = {
"site": {
"tenant_id": tenant_id,
"subnet_id": subnet_id,
"name": name,
"description": description,
"local_endpoint": local_endpoint,
"peer_endpoint": peer_endpoint,
"local_id": local_id,
"peer_id": peer_id,
"pri_networks": pri_networks,
"psk": psk,
"mtu": mtu
}
}
if location:
request['site']['location'] = location
return lib.doRequest(vpnService + '/sites', 'POST', request)['site']
def statsSite(id):
return lib.doRequest(vpnService + '/sites/{0}/stats'.format(id), 'GET')
def updateSite(id, site):
return lib.doRequest(vpnService + '/sites/{}'.format(id), 'PUT', site)['site']
def getSite(id):
return lib.doRequest(vpnService + '/sites/{}'.format(id), 'GET')['site']
def getSites():
return lib.doRequest(vpnService + '/sites', 'GET')['sites']
def deleteSite(id):
return lib.doRequest(vpnService + '/sites/{}'.format(id), 'DELETE')
#######################################################################################################
## IPSec Policy
def createIPSecPolicy(tenant_id, name='ipsec_policy1',
enc_alg='aes256', auth_alg='sha1',
dh_group='2', life_time=3600,description=None):
request = {
'ipsec_policy': {
'tenant_id': tenant_id,
'name': name,
'enc_alg': enc_alg,
'auth_alg': auth_alg,
'dh_group': dh_group,
'description': description,
'life_time': life_time}
}
return lib.doRequest(vpnService + '/ipsec_policys', 'POST', request)['ipsec_policy']
def updateIPSecPolicy(id, ipsec_policy):
return lib.doRequest(vpnService + '/ipsec_policys/{}'.format(id), 'PUT', ipsec_policy)['ipsec_policy']
def getIPSecPolicy(id):
return lib.doRequest(vpnService + '/ipsec_policys/{}'.format(id), 'GET')['ipsec_policy']
def getIPSecPolicys():
return lib.doRequest(vpnService + '/ipsec_policys', 'GET')['ipsec_policys']
def deleteIPSecPolicy(id):
return lib.doRequest(vpnService + '/ipsec_policys/{}'.format(id), 'DELETE')
#######################################################################################################
## Isakmp Policy
def createIsakmpPolicy(tenant_id, name='isakmp_policy1',
auth_mode='psk', enable_pfs=True,
enc_alg='aes256', auth_alg='sha1',
dh_group='2', life_time=28000,description=None):
request = {
'isakmp_policy': {
'tenant_id': tenant_id,
'name': name,
'auth_mode': auth_mode,
'enable_pfs': enable_pfs,
'enc_alg': enc_alg,
'auth_alg': auth_alg,
'dh_group': dh_group,
'description': description,
'life_time': life_time}
}
return lib.doRequest(vpnService + '/isakmp_policys', 'POST', request)['isakmp_policy']
def updateIsakmpPolicy(id, isakmp_policy):
return lib.doRequest(vpnService +
'/isakmp_policys/{}'.format(id), 'PUT', isakmp_policy)['isakmp_policy']
def getIsakmpPolicy(id):
return lib.doRequest(vpnService + '/isakmp_policys/{}'.format(id), 'GET')['isakmp_policy']
def getIsakmpPolicys():
return lib.doRequest(vpnService + '/isakmp_policys', 'GET')['isakmp_policys']
def deleteIsakmpPolicy(id):
return lib.doRequest(vpnService + '/isakmp_policys/{}'.format(id), 'DELETE')
############################################################################################
# Start of the scripts
#test networks creation operation
lib.NetworkInit()
#clean the sites first (also test get_site and delete_site)
sites = getSites()
if len(sites) >= 1:
for site in sites:
deleteSite(site['id'])
#test create_site
print '\n\nEDGE STATS:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/statistics")
print '\n\nEDGE CONFIG:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/config")
site = createSite(
tenant_id = tenantId,
subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14",
name = 'site1',
description = '',
local_endpoint = "10.117.35.202",
peer_endpoint = "10.117.35.203",
local_id = "192.168.2.11",
peer_id = "10.117.35.203",
psk = '123',
mtu = 1500,
pri_networks = [
{
'local_subnets': "192.168.1.0/24",
'peer_subnets': "192.168.11.0/24"
}])
#test get_sites
sites = getSites()
if len(sites) < 1:
print 'failed to create the site'
else:
print '\n\nSITES:'
print json.dumps(sites, indent=4)
#test get_site
site = getSite(sites[0]['id'])
print '\n\nSITE STATS:'
print json.dumps(statsSite(sites[0]['id']), indent=4)
print '\n\nCREATE EDGE STATS:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/statistics")
print '\n\nCREATE EDGE CONFIG:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/config")
##test update_site
update_site = {
"site" : {
"name": "new_site",
"description": "this is the updated site",
"local_endpoint": "10.117.35.202",
"peer_endpoint": "10.117.35.204",
"local_id": "192.168.2.11",
"peer_id": "10.117.35.204",
"psk": "new hello123",
"mtu": 1800,
"pri_networks" : [
{
'local_subnets': "192.168.2.0/24",
'peer_subnets': "192.168.13.0/24"
}],
}
}
site = updateSite(sites[0]['id'], update_site)
print '\n\nUPDATED SITES:'
sites = getSites()
print json.dumps(sites, indent=4)
print '\n\nUPDATE SITE STATS:'
print json.dumps(statsSite(sites[0]['id']), indent=4)
print '\n\nUPDATE EDGE STATS:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/statistics")
print '\n\nUPDATE EDGE CONFIG:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/config")
#test delete_site
deleteSite(sites[0]['id'])
sites = getSites()
if len(sites) >= 1:
print 'failed to delete the site'
print json.dumps(sites,indent=4)
print '\n\nDELETE EDGE STATS:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/statistics")
print '\n\nDELETE EDGE CONFIG:'
os.system("./restjson4.sh " + "GET " + edgeUri + " " + "/" +
edgeId + "/ipsec/config")
#############################################################################
## Start IPSec Policy test
#clean the ipsec_policys first (also test get_ipsec_policy and delete_ipsec_policy)
ipsec_policys = getIPSecPolicys()
if len(ipsec_policys) >= 1:
for ipsec_policy in ipsec_policys:
deleteIPSecPolicy(ipsec_policy['id'])
#test create_ipsec_policy
ipsec_policy = createIPSecPolicy(
tenant_id = tenantId,
name = 'ipsec_policy1',
description = '',
enc_alg = 'aes256',
auth_alg = 'sha1',
dh_group = '2',
life_time = 3600,
)
#test get_ipsec_policy
print '\n\nCREATE IPSec Policy STATS:'
print ipsec_policy
print json.dumps(ipsec_policy, indent=4)
#test update_ipsec_policy
update_ipsec_policy = {
"ipsec_policy" : {
"name": "new_ipsec_policy",
"description": "this is the updated ipsec_policy",
"dh_group": '5',
}
}
new_ipsec_policy = updateIPSecPolicy(ipsec_policy['id'], update_ipsec_policy)
print '\n\nUPDATED IPSec Policy:'
print json.dumps(new_ipsec_policy, indent=4)
#test delete_ipsec_policy
deleteIPSecPolicy(new_ipsec_policy['id'])
ipsec_policys = getIPSecPolicys()
if len(ipsec_policys) >= 1:
print 'failed to delete the ipsec_policy'
print json.dumps(ipsec_policys,indent=4)
######################################################################################
## Start Isakmp Policy
#clean the isakmp_policys first (also test get_isakmp_policy and delete_isakmp_policy)
isakmp_policys = getIsakmpPolicys()
if len(isakmp_policys) >= 1:
for isakmp_policy in isakmp_policys:
deleteIPSecPolicy(isakmp_policy['id'])
#test create_isakmp_policy
isakmp_policy = createIsakmpPolicy(
tenant_id = tenantId,
name = 'isakmp_policy1',
description = '',
enc_alg = 'aes256',
auth_alg = 'sha1',
dh_group = '2',
life_time = 3600,
)
#test get_isakmp_policy
print '\n\nCREATE Isakmp Policy STATS:'
print isakmp_policy
print json.dumps(isakmp_policy, indent=4)
#test update_isakmp_policy
update_isakmp_policy = {
"isakmp_policy" : {
"name": "new_isakmp_policy",
"description": "this is the updated isakmp_policy",
"dh_group": '5',
}
}
new_isakmp_policy = updateIsakmpPolicy(isakmp_policy['id'], update_isakmp_policy)
print '\n\nUPDATED Isakmp Policy:'
print json.dumps(new_isakmp_policy, indent=4)
#test delete_isakmp_policy
deleteIsakmpPolicy(new_isakmp_policy['id'])
isakmp_policys = getIsakmpPolicys()
if len(isakmp_policys) >= 1:
print 'failed to delete the isakmp_policy'
print json.dumps(isakmp_policys,indent=4)
| apache-2.0 |
davidcusatis/ursula | library/neutron_router_interface.py | 6 | 8536 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
from neutronclient.neutron import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='neutronclient and keystone client are required'")
DOCUMENTATION = '''
---
module: neutron_router_interface
short_description: Attach/Dettach a subnet's interface to a router
description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35358/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the subnet's interface should be attached.
required: true
default: None
subnet_name:
description:
- Name of the subnet to whose interface should be attached to the router.
required: true
default: None
tenant_name:
description:
- Name of the tenant whose subnet has to be attached.
required: false
default: None
examples:
- code: "neutron_router_interface: state=present login_username=admin login_password=admin login_tenant_name=admin
tenant_name=tenant1 router_name=external_route subnet_name=t1subnet"
description: "Attach tenant1's subnet to the external router"
requirements: ["neutronclient", "keystoneclient"]
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'),
cacert=kwargs.get('cacert'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
login_tenant_name = module.params['login_tenant_name']
else:
login_tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == login_tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the paramters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception as e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_subnet_id(module, neutron):
subnet_id = None
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['subnet_name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception as e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _get_port_id(neutron, module, router_id, subnet_id):
kwargs = {
'tenant_id': _os_tenant_id,
'device_id': router_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception as e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
for port in ports['ports']:
for subnet in port['fixed_ips']:
if subnet['subnet_id'] == subnet_id:
return port['id']
return None
def _add_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.add_interface_router(router_id, kwargs)
except Exception as e:
module.fail_json(msg = "Error in adding interface to router: %s" % e.message)
return True
def _remove_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.remove_interface_router(router_id, kwargs)
except Exception as e:
module.fail_json(msg="Error in removing interface from router: %s" % e.message)
return True
def main():
module = AnsibleModule(
argument_spec = dict(
login_username = dict(default='admin'),
login_password = dict(required=True),
login_tenant_name = dict(required='True'),
auth_url = dict(default='http://127.0.0.1:35358/v2.0/'),
cacert = dict(default=None),
region_name = dict(default=None),
router_name = dict(required=True),
subnet_name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
),
)
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.fail_json(msg="failed to get the subnet id, please check the subnet name")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
_add_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="created", id=port_id)
module.exit_json(changed=False, result="success", id=port_id)
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
module.exit_json(changed = False, result = "Sucess")
_remove_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| mit |
ageron/tensorflow | tensorflow/python/profiler/pprof_profiler.py | 109 | 15280 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler for TensorFlow models that outputs data in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto for pprof
profile format.
The following needs to be set for profiler to work:
* trace_level needs to be set to FULL_TRACE
* run_metadata object should be passed in to session.run call
Sample usage:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with tf.Session as sess:
...
sess.run(computation, run_metadata=run_metadata, options=options)
pprof_profiler.profile(sess.graph, run_metadata, output_dir)
The code above would output a pprof profile to separate output_dir/.*.pb.gz
file for each device. These files can be passed to pprof for formatting.
For e.g.:
pprof -png --nodecount=100 --sample_index=1 output_dir/profile_output.pb.gz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import namedtuple
import gzip
import os
import string
import sys
import time
from proto import profile_pb2
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
ProfileDatum = namedtuple('ProfileDatum', [
'node_exec_stats', 'op_type', 'traceback'])
class StringTable(object):
"""Keeps track of strings to add to string_table in pprof proto."""
def __init__(self):
# Pprof requires first entry in string_table to be ''.
self._string_table = ['']
self._string_to_index = {'': 0}
def index_of(self, value_str):
"""Get index of value_str in the string table.
If value_str is not in the string table, we will add it at the end
and then return the new index.
Args:
value_str: (string) Value to lookup/add in/to the string table.
Returns:
Index of value_str in the string table.
"""
if value_str is None:
value_str = ''
if value_str in self._string_to_index:
return self._string_to_index[value_str]
index = len(self._string_table)
self._string_table.append(value_str)
self._string_to_index[value_str] = index
return index
def next_index(self):
"""Gets index that would be assigned to the next added string.
Returns:
Index of the next string if it was added.
"""
return len(self._string_table)
def string_table(self):
"""Returns a list of strings to store in pprof's string_table."""
return self._string_table
class Functions(object):
"""Keeps track of `Function` protos for pprof profile."""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# Maps tuples in the form (file_path, function_name, start_line_number)
# to `Function` protos.
self._function_key_to_function = {}
def index_of(self, file_path, function_name, function_start_line):
"""Returns index of the function, adding the function if needed.
Args:
file_path: (string) Path to file where the function is defined.
function_name: (string) Function name.
function_start_line: (integer) Start line number of function definition.
Returns:
Function index.
"""
function_key = (file_path, function_name, function_start_line)
if function_key in self._function_key_to_function:
return self._function_key_to_function[function_key].id
else:
# Function indexes should start from 1
function_index = len(self._function_key_to_function) + 1
function = profile_pb2.Function()
function.id = function_index
function.name = self._string_table.index_of(function_name)
function.filename = self._string_table.index_of(file_path)
function.start_line = function_start_line
self._function_key_to_function[function_key] = function
return function_index
def function_protos(self):
"""Returns list of `profile_pb2.Function` protos."""
return self._function_key_to_function.values()
class Locations(object):
"""Keeps track of `Location` protos for pprof profile.
`Locations` store information about function call locations.
"""
def __init__(self, functions):
"""Constructor.
Args:
functions: A `Functions` object.
"""
self._functions = functions
# Maps tuples in the form (file_path, called_function_name, line_number)
# to `Location` protos.
self._location_key_to_location = {}
def index_of(
self, file_path, line_number, called_function_name, called_file_path,
called_function_start_line):
"""Returns index of the location, adding the location if needed.
Args:
file_path: (string) Path to file that makes the call.
line_number: (integer) Call line number.
called_function_name: (string) Function name of the function called at
`file_path` and `line_number`.
called_file_path: (string) Path to file where the called function is
defined.
called_function_start_line: (integer) Start line number of called
function definition in `called_file_path` file.
Returns:
Index of location.
"""
location_key = (file_path, called_function_name, line_number)
if location_key in self._location_key_to_location:
location = self._location_key_to_location[location_key]
return location.id
else:
# Location indexes should start from 1
location_index = len(self._location_key_to_location) + 1
location = profile_pb2.Location()
location.id = location_index
self._location_key_to_location[location_key] = location
line = location.line.add()
line.function_id = self._functions.index_of(
called_file_path, called_function_name, called_function_start_line)
line.line = line_number
return location_index
def location_protos(self):
"""Returns list of `profile_pb2.Location` protos."""
return self._location_key_to_location.values()
class Samples(object):
"""Keeps track of `Sample` protos for pprof profile.
Samples store the following statistics in order:
count, all_time, op_time
"""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# TODO(annarev): figure out if location is unique for each node name.
# If not, also key this dictionary based on location ids.
self._node_name_to_sample = {}
def add(self, datum, location_ids):
"""Adds a sample data point.
Args:
datum: `ProfileDatum` to add a sample for.
location_ids: List of numberic location ids for this
sample.
"""
node_name = datum.node_exec_stats.node_name
if node_name in self._node_name_to_sample:
sample = self._node_name_to_sample[node_name]
sample.location_id.extend(location_ids)
else:
sample = profile_pb2.Sample()
# Sample stores 3 values: count, all_time, op_time
sample.value.extend([0, 0, 0])
label = sample.label.add()
label.key = self._string_table.index_of('node_name')
label.str = self._string_table.index_of(node_name)
label = sample.label.add()
label.key = self._string_table.index_of('op_type')
label.str = self._string_table.index_of(datum.op_type)
self._node_name_to_sample[node_name] = sample
sample.value[0] += 1
sample.value[1] += datum.node_exec_stats.all_end_rel_micros
sample.value[2] += (
datum.node_exec_stats.op_end_rel_micros -
datum.node_exec_stats.op_start_rel_micros)
def get_sample_protos(self):
"""Returns list of `Sample` protos for pprof profile."""
return self._node_name_to_sample.values()
class PprofProfiler(object):
"""Creates profiles in pprof format."""
def __init__(self, graph, run_metadata):
"""Constructor.
Args:
graph: A `Graph` instance.
run_metadata: A list of `RunMetadata` objects.
"""
self._graph = graph
self._run_metadata = run_metadata
self._string_table = StringTable()
self._functions = Functions(self._string_table)
self._locations = Locations(self._functions)
def profile(self):
"""Generates pprof profiles.
Returns:
Dictionary mapping from device name to proto in `profile_pb2.Profile`
format.
"""
profiles = {}
data_generator_func = self._get_profile_data_generator()
for device_index, device_stats in enumerate(
self._run_metadata.step_stats.dev_stats):
# Create profile
pprof_proto = self._get_pprof_proto(data_generator_func(device_stats))
if not pprof_proto.sample:
print(
'Not enough data to create profile for device %s. Did you pass '
'RunMetadata to session.run call?' % device_stats.device)
continue
# Add device name comment
device_count = len(self._run_metadata.step_stats.dev_stats)
device_description = (
'Device %d of %d: %s' %
(device_index + 1, device_count, device_stats.device))
device_description_str_index = self._string_table.next_index()
pprof_proto.string_table.append(device_description)
pprof_proto.comment.append(device_description_str_index)
profiles[device_stats.device] = pprof_proto
return profiles
def _get_pprof_proto(self, profile_datum_generator):
"""Returns profile data in pprof proto format.
Args:
profile_datum_generator: Generator outputting `ProfileDatum` objects.
Returns:
A proto in pprof format.
"""
pprof_profile = profile_pb2.Profile()
samples = Samples(self._string_table)
for datum in profile_datum_generator:
if not datum.traceback:
continue
stack_frame = datum.traceback[-1]
after_apply_op = False
location_ids = []
# We add locations from stack trace in bottom-up order.
for stack_frame_index in reversed(range(len(datum.traceback) - 1)):
prev_stack_frame = stack_frame
stack_frame = datum.traceback[stack_frame_index]
# Call at current frame calls function at previous frame.
prev_file_path = prev_stack_frame[0]
prev_function = prev_stack_frame[2]
prev_function_start_line = prev_stack_frame[4]
curr_file_path = stack_frame[0]
curr_line_number = stack_frame[1]
# Skip all calls up to apply_op since they are the same for all ops.
if not after_apply_op:
if prev_function == 'apply_op':
after_apply_op = True
continue
location_index = self._locations.index_of(
curr_file_path, curr_line_number,
prev_function, prev_file_path, prev_function_start_line)
location_ids.append(location_index)
samples.add(datum, location_ids)
sample_type_description = 'count'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('count')
sample_type_description = 'all_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
sample_type_description = 'op_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
pprof_profile.string_table.extend(self._string_table.string_table())
pprof_profile.sample.extend(samples.get_sample_protos())
pprof_profile.function.extend(self._functions.function_protos())
pprof_profile.location.extend(self._locations.location_protos())
return pprof_profile
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_traceback = defaultdict(list)
node_to_op_type = defaultdict(str)
for op in self._graph.get_operations():
node_to_traceback[op.name] = op.traceback_with_start_lines
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':
continue
yield ProfileDatum(
node_stats,
node_to_op_type[node_stats.node_name],
node_to_traceback[node_stats.node_name])
return profile_data_generator
def get_profiles(graph, run_metadata):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
Returns:
A dictionary mapping from device name to pprof proto for that device.
"""
return PprofProfiler(graph, run_metadata).profile()
def profile(graph, run_metadata, output_dir=None):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
output_dir: (string) Directory to output pprof profile to.
Profile files for each device will be stored in compressed
serialized proto format. If output_dir is None, profile protos
will be printed to stdout instead.
Returns:
List of output files created by this profile call.
(Note: this list will be empty if output_dir is None)
"""
profiles = get_profiles(graph, run_metadata)
output_file_template = None
if output_dir:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
time_suffix = time.strftime('%Y%m%d%H%M%S')
output_file_template = os.path.join(
output_dir, '%s_' + time_suffix + '.pb.gz')
profile_files = []
for device, pprof_proto in profiles.items():
if output_file_template is None:
print('No output directory specified, printing to stdout instead.')
print(pprof_proto)
else:
device_name = str(device).strip('/').translate(
maketrans('/:', '__'))
profile_file = output_file_template % device_name
profile_files.append(profile_file)
with gzip.open(profile_file, 'w') as output_file:
print('Writing profile to %s...' % profile_file)
output_file.write(pprof_proto.SerializeToString())
return profile_files
| apache-2.0 |
maciek263/django2 | myvenv/Lib/site-packages/gunicorn/sock.py | 26 | 6999 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import socket
import stat
import sys
import time
from gunicorn import util
from gunicorn.six import string_types
SD_LISTEN_FDS_START = 3
class BaseSocket(object):
def __init__(self, address, conf, log, fd=None):
self.log = log
self.conf = conf
self.cfg_addr = address
if fd is None:
sock = socket.socket(self.FAMILY, socket.SOCK_STREAM)
else:
sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM)
self.sock = self.set_options(sock, bound=(fd is not None))
def __str__(self, name):
return "<socket %d>" % self.sock.fileno()
def __getattr__(self, name):
return getattr(self.sock, name)
def set_options(self, sock, bound=False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not bound:
self.bind(sock)
sock.setblocking(0)
# make sure that the socket can be inherited
if hasattr(sock, "set_inheritable"):
sock.set_inheritable(True)
sock.listen(self.conf.backlog)
return sock
def bind(self, sock):
sock.bind(self.cfg_addr)
def close(self):
try:
self.sock.close()
except socket.error as e:
self.log.info("Error while closing socket %s", str(e))
del self.sock
class TCPSocket(BaseSocket):
FAMILY = socket.AF_INET
def __str__(self):
if self.conf.is_ssl:
scheme = "https"
else:
scheme = "http"
addr = self.sock.getsockname()
return "%s://%s:%d" % (scheme, addr[0], addr[1])
def set_options(self, sock, bound=False):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return super(TCPSocket, self).set_options(sock, bound=bound)
class TCP6Socket(TCPSocket):
FAMILY = socket.AF_INET6
def __str__(self):
(host, port, fl, sc) = self.sock.getsockname()
return "http://[%s]:%d" % (host, port)
class UnixSocket(BaseSocket):
FAMILY = socket.AF_UNIX
def __init__(self, addr, conf, log, fd=None):
if fd is None:
try:
st = os.stat(addr)
except OSError as e:
if e.args[0] != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(addr)
else:
raise ValueError("%r is not a socket" % addr)
self.parent = os.getpid()
super(UnixSocket, self).__init__(addr, conf, log, fd=fd)
def __str__(self):
return "unix:%s" % self.cfg_addr
def bind(self, sock):
old_umask = os.umask(self.conf.umask)
sock.bind(self.cfg_addr)
util.chown(self.cfg_addr, self.conf.uid, self.conf.gid)
os.umask(old_umask)
def close(self):
super(UnixSocket, self).close()
if self.parent == os.getpid():
os.unlink(self.cfg_addr)
def _sock_type(addr):
if isinstance(addr, tuple):
if util.is_ipv6(addr[0]):
sock_type = TCP6Socket
else:
sock_type = TCPSocket
elif isinstance(addr, string_types):
sock_type = UnixSocket
else:
raise TypeError("Unable to create socket from: %r" % addr)
return sock_type
def create_sockets(conf, log):
"""
Create a new socket for the given address. If the
address is a tuple, a TCP socket is created. If it
is a string, a Unix socket is created. Otherwise
a TypeError is raised.
"""
# Systemd support, use the sockets managed by systemd and passed to
# gunicorn.
# http://www.freedesktop.org/software/systemd/man/systemd.socket.html
listeners = []
if ('LISTEN_PID' in os.environ
and int(os.environ.get('LISTEN_PID')) == os.getpid()):
for i in range(int(os.environ.get('LISTEN_FDS', 0))):
fd = i + SD_LISTEN_FDS_START
try:
sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)
sockname = sock.getsockname()
if isinstance(sockname, str) and sockname.startswith('/'):
listeners.append(UnixSocket(sockname, conf, log, fd=fd))
elif len(sockname) == 2 and '.' in sockname[0]:
listeners.append(TCPSocket("%s:%s" % sockname, conf, log,
fd=fd))
elif len(sockname) == 4 and ':' in sockname[0]:
listeners.append(TCP6Socket("[%s]:%s" % sockname[:2], conf,
log, fd=fd))
except socket.error:
pass
del os.environ['LISTEN_PID'], os.environ['LISTEN_FDS']
if listeners:
log.debug('Socket activation sockets: %s',
",".join([str(l) for l in listeners]))
return listeners
# get it only once
laddr = conf.address
# check ssl config early to raise the error on startup
# only the certfile is needed since it can contains the keyfile
if conf.certfile and not os.path.exists(conf.certfile):
raise ValueError('certfile "%s" does not exist' % conf.certfile)
if conf.keyfile and not os.path.exists(conf.keyfile):
raise ValueError('keyfile "%s" does not exist' % conf.keyfile)
# sockets are already bound
if 'GUNICORN_FD' in os.environ:
fds = os.environ.pop('GUNICORN_FD').split(',')
for i, fd in enumerate(fds):
fd = int(fd)
addr = laddr[i]
sock_type = _sock_type(addr)
try:
listeners.append(sock_type(addr, conf, log, fd=fd))
except socket.error as e:
if e.args[0] == errno.ENOTCONN:
log.error("GUNICORN_FD should refer to an open socket.")
else:
raise
return listeners
# no sockets is bound, first initialization of gunicorn in this env.
for addr in laddr:
sock_type = _sock_type(addr)
# If we fail to create a socket from GUNICORN_FD
# we fall through and try and open the socket
# normally.
sock = None
for i in range(5):
try:
sock = sock_type(addr, conf, log)
except socket.error as e:
if e.args[0] == errno.EADDRINUSE:
log.error("Connection in use: %s", str(addr))
if e.args[0] == errno.EADDRNOTAVAIL:
log.error("Invalid address: %s", str(addr))
if i < 5:
log.error("Retrying in 1 second.")
time.sleep(1)
else:
break
if sock is None:
log.error("Can't connect to %s", str(addr))
sys.exit(1)
listeners.append(sock)
return listeners
| mit |
kalrey/swift | swift/common/middleware/tempauth.py | 3 | 29561 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from time import time
from traceback import format_exc
from urllib import unquote
from uuid import uuid4
from hashlib import sha1
import hmac
import base64
from eventlet import Timeout
from swift.common.swob import Response, Request
from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
HTTPUnauthorized
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.middleware.acl import (
clean_acl, parse_acl, referrer_allowed, acls_from_account_info)
from swift.common.utils import cache_from_env, get_logger, \
split_path, config_true_value, register_swift_info
from swift.proxy.controllers.base import get_account_info
class TempAuth(object):
"""
Test authentication and authorization system.
Add to your pipeline in proxy-server.conf, such as::
[pipeline:main]
pipeline = catch_errors cache tempauth proxy-server
Set account auto creation to true in proxy-server.conf::
[app:proxy-server]
account_autocreate = true
And add a tempauth filter section, such as::
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
# To allow accounts/users with underscores you can base64 encode them.
# Here is the account "under_score" and username "a_b" (note the lack
# of padding equal signs):
user64_dW5kZXJfc2NvcmU_YV9i = testing4
See the proxy-server.conf-sample for more information.
Account ACLs:
If a swift_owner issues a POST or PUT to the account, with the
X-Account-Access-Control header set in the request, then this may
allow certain types of access for additional users.
* Read-Only: Users with read-only access can list containers in the
account, list objects in any container, retrieve objects, and view
unprivileged account/container/object metadata.
* Read-Write: Users with read-write access can (in addition to the
read-only privileges) create objects, overwrite existing objects,
create new containers, and set unprivileged container/object
metadata.
* Admin: Users with admin access are swift_owners and can perform
any action, including viewing/setting privileged metadata (e.g.
changing account ACLs).
To generate headers for setting an account ACL::
from swift.common.middleware.acl import format_acl
acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] }
header_value = format_acl(version=2, acl_dict=acl_data)
To generate a curl command line from the above::
token=...
storage_url=...
python -c '
from swift.common.middleware.acl import format_acl
acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] }
headers = {'X-Account-Access-Control':
format_acl(version=2, acl_dict=acl_data)}
header_str = ' '.join(["-H '%s: %s'" % (k, v)
for k, v in headers.items()])
print ('curl -D- -X POST -H "x-auth-token: $token" %s '
'$storage_url' % header_str)
'
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values from the Paste config file
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='tempauth')
self.log_headers = config_true_value(conf.get('log_headers', 'f'))
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
self.logger.set_statsd_prefix('tempauth.%s' % (
self.reseller_prefix if self.reseller_prefix else 'NONE',))
self.auth_prefix = conf.get('auth_prefix', '/auth/')
if not self.auth_prefix or not self.auth_prefix.strip('/'):
self.logger.warning('Rewriting invalid auth prefix "%s" to '
'"/auth/" (Non-empty auth prefix path '
'is required)' % self.auth_prefix)
self.auth_prefix = '/auth/'
if self.auth_prefix[0] != '/':
self.auth_prefix = '/' + self.auth_prefix
if self.auth_prefix[-1] != '/':
self.auth_prefix += '/'
self.token_life = int(conf.get('token_life', 86400))
self.allow_overrides = config_true_value(
conf.get('allow_overrides', 't'))
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
self.users = {}
for conf_key in conf:
if conf_key.startswith('user_') or conf_key.startswith('user64_'):
account, username = conf_key.split('_', 1)[1].split('_')
if conf_key.startswith('user64_'):
# Because trailing equal signs would screw up config file
# parsing, we auto-pad with '=' chars.
account += '=' * (len(account) % 4)
account = base64.b64decode(account)
username += '=' * (len(username) % 4)
username = base64.b64decode(username)
values = conf[conf_key].split()
if not values:
raise ValueError('%s has no key set' % conf_key)
key = values.pop(0)
if values and ('://' in values[-1] or '$HOST' in values[-1]):
url = values.pop()
else:
url = '$HOST/v1/%s%s' % (self.reseller_prefix, account)
self.users[account + ':' + username] = {
'key': key, 'url': url, 'groups': values}
def __call__(self, env, start_response):
"""
Accepts a standard WSGI application call, authenticating the request
and installing callback hooks for authorization and ACL header
validation. For an authenticated request, REMOTE_USER will be set to a
comma separated list of the user's groups.
With a non-empty reseller prefix, acts as the definitive auth service
for just tokens and accounts that begin with that prefix, but will deny
requests outside this prefix if no other auth middleware overrides it.
With an empty reseller prefix, acts as the definitive auth service only
for tokens that validate to a non-empty set of groups. For all other
requests, acts as the fallback auth service when no other auth
middleware overrides it.
Alternatively, if the request matches the self.auth_prefix, the request
will be routed through the internal auth request handler (self.handle).
This is to handle granting tokens, etc.
"""
if self.allow_overrides and env.get('swift.authorize_override', False):
return self.app(env, start_response)
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
return self.handle(env, start_response)
s3 = env.get('HTTP_AUTHORIZATION')
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
groups = self.get_groups(env, token)
if groups:
user = groups and groups.split(',', 1)[0] or ''
trans_id = env.get('swift.trans_id')
self.logger.debug('User: %s uses token %s (trans_id %s)' %
(user, 's3' if s3 else token, trans_id))
env['REMOTE_USER'] = groups
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
if '.reseller_admin' in groups:
env['reseller_request'] = True
else:
# Unauthorized token
if self.reseller_prefix and not s3:
# Because I know I'm the definitive auth for this token, I
# can deny it outright.
self.logger.increment('unauthorized')
try:
vrs, realm, rest = split_path(env['PATH_INFO'],
2, 3, True)
except ValueError:
realm = 'unknown'
return HTTPUnauthorized(headers={
'Www-Authenticate': 'Swift realm="%s"' % realm})(
env, start_response)
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed tokens, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
else:
if self.reseller_prefix:
# With a non-empty reseller_prefix, I would like to be called
# back for anonymous access to accounts I know I'm the
# definitive auth for.
try:
version, rest = split_path(env.get('PATH_INFO', ''),
1, 2, True)
except ValueError:
version, rest = None, None
self.logger.increment('errors')
if rest and rest.startswith(self.reseller_prefix):
# Handle anonymous access to accounts I'm the definitive
# auth for.
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
# Not my token, not my account, I can't authorize this request,
# deny all is a good idea if not already set...
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed accounts, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
return self.app(env, start_response)
def _get_user_groups(self, account, account_user, account_id):
"""
:param account: example: test
:param account_user: example: test:tester
"""
groups = [account, account_user]
groups.extend(self.users[account_user]['groups'])
if '.admin' in groups:
groups.remove('.admin')
groups.append(account_id)
groups = ','.join(groups)
return groups
def get_groups(self, env, token):
"""
Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
groups = None
memcache_client = cache_from_env(env)
if not memcache_client:
raise Exception('Memcache required')
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
if env.get('HTTP_AUTHORIZATION'):
account_user, sign = \
env['HTTP_AUTHORIZATION'].split(' ')[1].rsplit(':', 1)
if account_user not in self.users:
return None
account, user = account_user.split(':', 1)
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace(account_user, account_id, 1)
msg = base64.urlsafe_b64decode(unquote(token))
key = self.users[account_user]['key']
s = base64.encodestring(hmac.new(key, msg, sha1).digest()).strip()
if s != sign:
return None
groups = self._get_user_groups(account, account_user, account_id)
return groups
def account_acls(self, req):
"""
Return a dict of ACL data from the account server via get_account_info.
Auth systems may define their own format, serialization, structure,
and capabilities implemented in the ACL headers and persisted in the
sysmeta data. However, auth systems are strongly encouraged to be
interoperable with Tempauth.
Account ACLs are set and retrieved via the header
X-Account-Access-Control
For header format and syntax, see:
* :func:`swift.common.middleware.acl.parse_acl()`
* :func:`swift.common.middleware.acl.format_acl()`
"""
info = get_account_info(req.environ, self.app, swift_source='TA')
try:
acls = acls_from_account_info(info)
except ValueError as e1:
self.logger.warn("Invalid ACL stored in metadata: %r" % e1)
return None
except NotImplementedError as e2:
self.logger.warn("ACL version exceeds middleware version: %r" % e2)
return None
return acls
def extract_acl_and_report_errors(self, req):
"""
Return a user-readable string indicating the errors in the input ACL,
or None if there are no errors.
"""
acl_header = 'x-account-access-control'
acl_data = req.headers.get(acl_header)
result = parse_acl(version=2, data=acl_data)
if result is None:
return 'Syntax error in input (%r)' % acl_data
tempauth_acl_keys = 'admin read-write read-only'.split()
for key in result:
# While it is possible to construct auth systems that collaborate
# on ACLs, TempAuth is not such an auth system. At this point,
# it thinks it is authoritative.
if key not in tempauth_acl_keys:
return 'Key %r not recognized' % key
for key in tempauth_acl_keys:
if key not in result:
continue
if not isinstance(result[key], list):
return 'Value for key %r must be a list' % key
for grantee in result[key]:
if not isinstance(grantee, str):
return 'Elements of %r list must be strings' % key
# Everything looks fine, no errors found
internal_hdr = get_sys_meta_prefix('account') + 'core-access-control'
req.headers[internal_hdr] = req.headers.pop(acl_header)
return None
def authorize(self, req):
"""
Returns None if the request is authorized to continue or a standard
WSGI response callable if not.
"""
try:
_junk, account, container, obj = req.split_path(1, 4, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not account or not account.startswith(self.reseller_prefix):
self.logger.debug("Account name: %s doesn't start with "
"reseller_prefix: %s."
% (account, self.reseller_prefix))
return self.denied_response(req)
# At this point, TempAuth is convinced that it is authoritative.
# If you are sending an ACL header, it must be syntactically valid
# according to TempAuth's rules for ACL syntax.
acl_data = req.headers.get('x-account-access-control')
if acl_data is not None:
error = self.extract_acl_and_report_errors(req)
if error:
msg = 'X-Account-Access-Control invalid: %s\n\nInput: %s\n' % (
error, acl_data)
headers = [('Content-Type', 'text/plain; charset=UTF-8')]
return HTTPBadRequest(request=req, headers=headers, body=msg)
user_groups = (req.remote_user or '').split(',')
account_user = user_groups[1] if len(user_groups) > 1 else None
if '.reseller_admin' in user_groups and \
account != self.reseller_prefix and \
account[len(self.reseller_prefix)] != '.':
req.environ['swift_owner'] = True
self.logger.debug("User %s has reseller admin authorizing."
% account_user)
return None
if account in user_groups and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
req.environ['swift_owner'] = True
self.logger.debug("User %s has admin authorizing."
% account_user)
return None
if (req.environ.get('swift_sync_key')
and (req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None))
and 'x-timestamp' in req.headers):
self.logger.debug("Allow request with container sync-key: %s."
% req.environ['swift_sync_key'])
return None
if req.method == 'OPTIONS':
#allow OPTIONS requests to proceed as normal
self.logger.debug("Allow OPTIONS request.")
return None
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in groups:
self.logger.debug("Allow authorizing %s via referer ACL."
% req.referer)
return None
for user_group in user_groups:
if user_group in groups:
self.logger.debug("User %s allowed in ACL: %s authorizing."
% (account_user, user_group))
return None
# Check for access via X-Account-Access-Control
acct_acls = self.account_acls(req)
if acct_acls:
# At least one account ACL is set in this account's sysmeta data,
# so we should see whether this user is authorized by the ACLs.
user_group_set = set(user_groups)
if user_group_set.intersection(acct_acls['admin']):
req.environ['swift_owner'] = True
self.logger.debug('User %s allowed by X-Account-Access-Control'
' (admin)' % account_user)
return None
if (user_group_set.intersection(acct_acls['read-write']) and
(container or req.method in ('GET', 'HEAD'))):
# The RW ACL allows all operations to containers/objects, but
# only GET/HEAD to accounts (and OPTIONS, above)
self.logger.debug('User %s allowed by X-Account-Access-Control'
' (read-write)' % account_user)
return None
if (user_group_set.intersection(acct_acls['read-only']) and
req.method in ('GET', 'HEAD')):
self.logger.debug('User %s allowed by X-Account-Access-Control'
' (read-only)' % account_user)
return None
return self.denied_response(req)
def denied_response(self, req):
"""
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
self.logger.increment('forbidden')
return HTTPForbidden(request=req)
else:
self.logger.increment('unauthorized')
return HTTPUnauthorized(request=req)
def handle(self, env, start_response):
"""
WSGI entry point for auth requests (ones that match the
self.auth_prefix).
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
req = Request(env)
if self.auth_prefix:
req.path_info_pop()
req.bytes_transferred = '-'
req.client_disconnect = False
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return self.handle_request(req)(env, start_response)
except (Exception, Timeout):
print("EXCEPTION IN handle: %s: %s" % (format_exc(), env))
self.logger.increment('errors')
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def handle_request(self, req):
"""
Entry point for auth requests (ones that match the self.auth_prefix).
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
req.start_time = time()
handler = None
try:
version, account, user, _junk = req.split_path(1, 4, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if version in ('v1', 'v1.0', 'auth'):
if req.method == 'GET':
handler = self.handle_get_token
if not handler:
self.logger.increment('errors')
req.response = HTTPBadRequest(request=req)
else:
req.response = handler(req)
return req.response
def handle_get_token(self, req):
"""
Handles the various `request for token and service end point(s)` calls.
There are various formats to support the various auth servers in the
past. Examples::
GET <auth-prefix>/v1/<act>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/v1.0
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
On successful authentication, the response will have X-Auth-Token and
X-Storage-Token set to the token to use with Swift and X-Storage-URL
set to the URL to the default Swift cluster to use.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with data set as explained
above.
"""
# Validate the request info
try:
pathsegs = split_path(req.path_info, 1, 3, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
account = pathsegs[1]
user = req.headers.get('x-storage-user')
if not user:
user = req.headers.get('x-auth-user')
if not user or ':' not in user:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req, headers=
{'Www-Authenticate':
'Swift realm="%s"' % account})
account2, user = user.split(':', 1)
if account != account2:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req, headers=
{'Www-Authenticate':
'Swift realm="%s"' % account})
key = req.headers.get('x-storage-pass')
if not key:
key = req.headers.get('x-auth-key')
elif pathsegs[0] in ('auth', 'v1.0'):
user = req.headers.get('x-auth-user')
if not user:
user = req.headers.get('x-storage-user')
if not user or ':' not in user:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req, headers=
{'Www-Authenticate':
'Swift realm="unknown"'})
account, user = user.split(':', 1)
key = req.headers.get('x-auth-key')
if not key:
key = req.headers.get('x-storage-pass')
else:
return HTTPBadRequest(request=req)
if not all((account, user, key)):
self.logger.increment('token_denied')
realm = account or 'unknown'
return HTTPUnauthorized(request=req, headers={'Www-Authenticate':
'Swift realm="%s"' %
realm})
# Authenticate user
account_user = account + ':' + user
if account_user not in self.users:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req, headers=
{'Www-Authenticate':
'Swift realm="%s"' % account})
if self.users[account_user]['key'] != key:
self.logger.increment('token_denied')
return HTTPUnauthorized(request=req, headers=
{'Www-Authenticate':
'Swift realm="unknown"'})
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
# Get memcache client
memcache_client = cache_from_env(req.environ)
if not memcache_client:
raise Exception('Memcache required')
# See if a token already exists and hasn't expired
token = None
memcache_user_key = '%s/user/%s' % (self.reseller_prefix, account_user)
candidate_token = memcache_client.get(memcache_user_key)
if candidate_token:
memcache_token_key = \
'%s/token/%s' % (self.reseller_prefix, candidate_token)
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
expires, old_groups = cached_auth_data
old_groups = old_groups.split(',')
new_groups = self._get_user_groups(account, account_user,
account_id)
if expires > time() and \
set(old_groups) == set(new_groups.split(',')):
token = candidate_token
# Create a new token if one didn't exist
if not token:
# Generate new token
token = '%stk%s' % (self.reseller_prefix, uuid4().hex)
expires = time() + self.token_life
groups = self._get_user_groups(account, account_user, account_id)
# Save token
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
memcache_client.set(memcache_token_key, (expires, groups),
time=float(expires - time()))
# Record the token with the user info for future use.
memcache_user_key = \
'%s/user/%s' % (self.reseller_prefix, account_user)
memcache_client.set(memcache_user_key, token,
time=float(expires - time()))
resp = Response(request=req, headers={
'x-auth-token': token, 'x-storage-token': token})
url = self.users[account_user]['url'].replace('$HOST', resp.host_url)
if self.storage_url_scheme != 'default':
url = self.storage_url_scheme + ':' + url.split(':', 1)[1]
resp.headers['x-storage-url'] = url
return resp
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('tempauth', account_acls=True)
def auth_filter(app):
return TempAuth(app, conf)
return auth_filter
| apache-2.0 |
bdh1011/cupeye | venv/lib/python2.7/site-packages/setuptools/command/bdist_rpm.py | 1049 | 1508 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| bsd-3-clause |
dturner-tw/pants | tests/python/pants_test/backend/python/test_pants_requirement.py | 17 | 1738 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.register import build_file_aliases
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.build_environment import pants_version
from pants_test.base_test import BaseTest
class PantsRequirementTest(BaseTest):
@property
def alias_groups(self):
# NB: We use aliases and BUILD files to test proper registration of the pants_requirement macro.
return build_file_aliases()
def assert_pants_requirement(self, python_requirement_library):
self.assertIsInstance(python_requirement_library, PythonRequirementLibrary)
pants_requirement = PythonRequirement('pantsbuild.pants=={}'.format(pants_version()))
self.assertEqual([pants_requirement.requirement],
list(pr.requirement for pr in python_requirement_library.payload.requirements))
def test_default_name(self):
self.add_to_build_file('3rdparty/python/pants', 'pants_requirement()')
python_requirement_library = self.target('3rdparty/python/pants')
self.assert_pants_requirement(python_requirement_library)
def test_custom_name(self):
self.add_to_build_file('3rdparty/python/pants', "pants_requirement('pantsbuild.pants')")
python_requirement_library = self.target('3rdparty/python/pants:pantsbuild.pants')
self.assert_pants_requirement(python_requirement_library)
| apache-2.0 |
windskyer/mvpn | mvpn/db/sqlalchemy/migrate_repo/versions/002_add_cui_table.py | 1 | 1480 | from sqlalchemy import *
from migrate import *
def _create_cui_table(migrate_engine, drop=False):
meta = MetaData()
meta.bind = migrate_engine
cui = Table('cui', meta,
Column('clientipaddress', VARCHAR(15), primary_key=True, nullable=False, server_default=''),
Column('callingstationid', VARCHAR(50), primary_key=True, nullable=False, server_default=''),
Column('username', VARCHAR(64), primary_key=True, nullable=False, server_default=''),
Column('cui', VARCHAR(32), nullable=False, server_default=''),
Column('creationdate', TIMESTAMP, nullable=False),
Column('lastaccounting', TIMESTAMP, nullable=False, server_default='0000-00-00 00:00:00'),
extend_existing=True,
mysql_engine='MyISAM',
mysql_charset='utf8'
)
tables = [cui]
for table in tables:
if not drop:
try:
table.create()
except Exception:
raise
else:
try:
table.drop()
except Exception:
raise
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
_create_cui_table(migrate_engine)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
_create_cui_table(migrate_engine, drop=True)
| gpl-2.0 |
FRidh/Sea | Sea/adapter/components/Component1DBeam.py | 2 | 2237 | """
Adapter class for :class:`Sea.model.components.Component1DBeam`
"""
import Sea
from ComponentStructural import ComponentStructural
from ..subsystems import SubsystemStructuralLong, SubsystemStructuralBend, SubsystemStructuralShear
class SubsystemLong(SubsystemStructuralLong, Sea.model.components.Component1DBeam.SubsystemLong):
pass
class SubsystemBend(SubsystemStructuralBend, Sea.model.components.Component1DBeam.SubsystemBend):
pass
class SubsystemShear(SubsystemStructuralShear, Sea.model.components.Component1DBeam.SubsystemShear):
pass
class Component1DBeam(ComponentStructural, Sea.model.components.Component1DBeam.Component1DBeam):
"""
Beam structural component.
This adapter describes a :class:`Sea.model.components.Component1DBeam`
"""
name = 'Beam'
description = 'A structural component with wave propagation along one dimension.'
def __init__(self, obj, material, part):
ComponentStructural.__init__(self, obj, material, part)
obj.addProperty("App::PropertyLength", "Length", "Beam", "Length of the beam")
obj.setEditorMode("MaxLength", 1)
obj.addProperty("App::PropertyFloat", "CrossSection", "Beam", "Cross section of the beam")
obj.setEditorMode("CrossSection", 1)
obj.addProperty("App::PropertyFloat", "MassPerArea", "Beam", "Mass per unit area")
obj.setEditorMode("MassPerArea", 1)
obj.addProperty("App::PropertyFloat", "AreaMoment", "Beam", "Area moment of inertia")
obj.setEditorMode("AreaMoment", 1)
self.SubsystemLong = obj.makeSubsystem(SubsystemLong())
self.SubsystemBend = obj.makeSubsystem(SubsystemBend())
self.SubsystemShear = obj.makeSubsystem(SubsystemShear())
def onChanged(self, obj, prop):
ComponentStructural.onChanged(self, obj, prop)
if prop == 'Length':
obj.Proxy.model.length = obj.Length
def execute(self, obj):
ComponentStructural.execute(self, obj)
obj.Length = obj.Proxy.model.length
obj.AreaMoment = obj.Proxy.model.area_moment_of_inertia
obj.CrossSection = obj.Proxy.model.cross_section
| bsd-3-clause |
dmsurti/mayavi | mayavi/modules/labels.py | 3 | 7916 | # Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
# Enthought library imports.
from traits.api import Int, Instance, Str, TraitError
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from apptools.persistence import state_pickler
# Local imports.
from mayavi.core.common import error
from mayavi.core.pipeline_base import PipelineBase
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.module import Module
from mayavi.filters.optional import Optional
from mayavi.filters.mask_points import MaskPoints
from mayavi.filters.user_defined import UserDefined
from mayavi.components.actor2d import Actor2D
from mayavi.core.common import handle_children_state
################################################################################
# `Labels` class.
################################################################################
class Labels(Module):
"""
Allows a user to label the current dataset or the current actor of
the active module.
"""
# Used for persistence.
__version__ = 0
# The object which we are labeling.
object = Instance(PipelineBase, record=False)
# The label format string.
label_format = Str('', enter_set=True, auto_set=False,
desc='the label format string')
# Number of points to label.
number_of_labels = Int(25, enter_set=True, auto_set=False,
desc='the number of points to label')
# The filter used for masking of the points.
mask = Instance(MaskPoints, record=True)
# Filter to select visible points.
visible_points = Instance(Optional, record=True)
# The 2D actor for the labels.
actor = Instance(Actor2D, record=True)
# The text property of the labels.
property = Instance(tvtk.TextProperty, record=True)
# The mapper for the labels.
mapper = Instance(tvtk.LabeledDataMapper, args=(), record=True)
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
########################################
# Private traits.
# The input used for the labeling.
input = Instance(PipelineBase)
# The id of the object in the modulemanager only used for
# persistence.
object_id = Int(-2)
########################################
# View related traits.
view = View(Group(Item(name='number_of_labels'),
Item(name='label_format'),
Item(name='mapper',
style='custom',
show_label=False,
resizable=True),
Item(name='mask',
style='custom',
resizable=True,
show_label=False),
label='Labels'
),
Group(
Item(name='visible_points',
style='custom',
resizable=True,
show_label=False),
label='VisiblePoints'
),
Group(Item(name='property',
style='custom',
show_label=False,
resizable=True),
label='TextProperty'
),
resizable=True
)
######################################################################
# `object` interface.
######################################################################
def __get_pure_state__(self):
self._compute_object_id()
d = super(Labels, self).__get_pure_state__()
for name in ('object', 'mapper', 'input'):
d.pop(name, None)
# Must pickle the components.
d['components'] = self.components
return d
def __set_pure_state__(self, state):
handle_children_state(self.components, state.components)
state_pickler.set_state(self, state)
self.update_pipeline()
######################################################################
# `Module` interface.
######################################################################
def setup_pipeline(self):
mask = MaskPoints()
mask.filter.set(generate_vertices=True, random_mode=True)
self.mask = mask
v = UserDefined(filter=tvtk.SelectVisiblePoints(),
name='VisiblePoints')
self.visible_points = Optional(filter=v, enabled=False)
mapper = tvtk.LabeledDataMapper()
self.mapper = mapper
self.actor = Actor2D(mapper=mapper)
self.property = mapper.label_text_property
self.property.on_trait_change(self.render)
self.components = [self.mask, self.visible_points, self.actor]
def update_pipeline(self):
mm = self.module_manager
if mm is None:
return
self._find_input() # Calculates self.input
self.mask.inputs = [self.input]
self.visible_points.inputs = [self.mask]
self.actor.inputs = [self.visible_points]
self._number_of_labels_changed(self.number_of_labels)
self._label_format_changed(self.label_format)
######################################################################
# Non-public interface.
######################################################################
def _find_input(self):
mm = self.module_manager
if self.object is None:
if self.object_id == -1:
self.input = mm.source
elif self.object_id > -1:
obj = mm.children[self.object_id]
if hasattr(obj, 'actor'):
self.set(object=obj, trait_change_notify=False)
self.input = obj.actor.inputs[0]
else:
self.input = mm.source
else:
o = self.object
if hasattr(o, 'module_manager'):
# A module.
if hasattr(o, 'actor'):
self.input = o.actor.inputs[0]
else:
self.input = o.module_manager.source
if self.input is None:
if self.object_id == -2:
self.input = mm.source
else:
error('No object to label!')
return
def _number_of_labels_changed(self, value):
if self.input is None:
return
f = self.mask.filter
inp = self.input.outputs[0]
if hasattr(inp, 'update'):
inp.update()
npts = inp.number_of_points
typ = type(f.on_ratio)
f.on_ratio = typ(max(npts/value, 1))
if self.mask.running:
f.update()
self.mask.data_changed = True
def _label_format_changed(self, value):
if len(value) > 0:
self.mapper.label_format = value
self.render()
else:
try:
self.mapper.label_format = None
except TraitError:
self.mapper.label_format = '%g'
self.render()
def _object_changed(self, value):
self.update_pipeline()
def _compute_object_id(self):
mm = self.module_manager
input = self.input
self.object_id = -2
if input is mm.source:
self.object_id = -1
return
for id, child in enumerate(mm.children):
if child is self.object:
self.object_id = id
return
def _scene_changed(self, old, new):
self.visible_points.filter.filter.renderer = new.renderer
super(Labels, self)._scene_changed(old, new)
| bsd-3-clause |
AnhellO/DAS_Sistemas | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/contrib/redirects/middleware.py | 109 | 1926 | from django.apps import apps
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseGone, HttpResponsePermanentRedirect
from django.utils.deprecation import MiddlewareMixin
class RedirectFallbackMiddleware(MiddlewareMixin):
# Defined as class-level attributes to be subclassing-friendly.
response_gone_class = HttpResponseGone
response_redirect_class = HttpResponsePermanentRedirect
def __init__(self, get_response=None):
if not apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
super().__init__(get_response)
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is None and settings.APPEND_SLASH and not request.path.endswith('/'):
try:
r = Redirect.objects.get(
site=current_site,
old_path=request.get_full_path(force_append_slash=True),
)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return self.response_gone_class()
return self.response_redirect_class(r.new_path)
# No redirect was found. Return the response.
return response
| mit |
wdsgyj/google-breakpad-mirror | src/tools/gyp/test/lib/TestGyp.py | 112 | 39996 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
import collections
import itertools
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import TestCmd
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
def remove_debug_line_numbers(contents):
"""Function to remove the line numbers from the debug output
of gyp and thus remove the exremem fragility of the stdout
comparison tests.
"""
lines = contents.splitlines()
# split each line on ":"
lines = [l.split(":", 3) for l in lines]
# join each line back together while ignoring the
# 3rd column which is the line number
lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
return "\n".join(lines)
def match_modulo_line_numbers(contents_a, contents_b):
"""File contents matcher that ignores line numbers."""
contents_a = remove_debug_line_numbers(contents_a)
contents_b = remove_debug_line_numbers(contents_b)
return TestCommon.match_exact(contents_a, contents_b)
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
self.extra_args = sys.argv[1:]
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyp.bat'
else:
gyp = 'gyp'
self.gyp = os.path.abspath(gyp)
self.no_parallel = False
self.initialize_build_tool()
kw.setdefault('match', TestCommon.match_exact)
# Put test output in out/testworkarea by default.
# Use temporary names so there are no collisions.
workdir = os.path.join('out', kw.get('workdir', 'testworkarea'))
# Create work area if it doesn't already exist.
if not os.path.isdir(workdir):
os.makedirs(workdir)
kw['workdir'] = tempfile.mktemp(prefix='testgyp.', dir=workdir)
formats = kw.pop('formats', [])
super(TestGypBase, self).__init__(*args, **kw)
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+self.format in excluded_formats or
included_formats and self.format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = self.workpath()
# Clear $GYP_DEFINES for the same reason.
if 'GYP_DEFINES' in os.environ:
del os.environ['GYP_DEFINES']
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_contain(self, name, contents, **kw):
"""
Fails the test if the specified built file name contains the specified
contents.
"""
return self.must_not_contain(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# When running gyp, and comparing its output we use a comparitor
# that ignores the line numbers that gyp logs in its debug output.
if kw.pop('ignore_line_numbers', False):
kw.setdefault('match', match_modulo_line_numbers)
# TODO: --depth=. works around Chromium-specific tree climbing.
depth = kw.pop('depth', '.')
run_args = ['--depth='+depth, '--format='+self.format, gyp_file]
if self.no_parallel:
run_args += ['--no-parallel']
run_args.extend(self.extra_args)
run_args.extend(args)
return self.run(program=self.gyp, arguments=run_args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def built_file_basename(self, name, type=None, **kw):
"""
Returns the base name of the specified file name, of the specified type.
A bare=True keyword argument specifies that prefixes and suffixes shouldn't
be applied.
"""
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return name
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
The specified name should be independent of any particular generator.
Subclasses should find the output executable in the appropriate
output build directory, tack on any necessary executable suffix, etc.
"""
raise NotImplementedError
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
The subclass should implement this by calling build()
(or a reasonable equivalent), checking whatever conditions
will tell it the build was an "up to date" null build, and
failing if it isn't.
"""
raise NotImplementedError
class TestGypGypd(TestGypBase):
"""
Subclass for testing the GYP 'gypd' generator (spit out the
internal data structure as pretty-printed Python).
"""
format = 'gypd'
def __init__(self, gyp=None, *args, **kw):
super(TestGypGypd, self).__init__(*args, **kw)
# gypd implies the use of 'golden' files, so parallelizing conflicts as it
# causes ordering changes.
self.no_parallel = True
class TestGypCustom(TestGypBase):
"""
Subclass for testing the GYP with custom generator
"""
def __init__(self, gyp=None, *args, **kw):
self.format = kw.pop("format")
super(TestGypCustom, self).__init__(*args, **kw)
class TestGypAndroid(TestGypBase):
"""
Subclass for testing the GYP Android makefile generator. Note that
build/envsetup.sh and lunch must have been run before running tests.
TODO: This is currently an incomplete implementation. We do not support
run_built_executable(), so we pass only tests which do not use this. As a
result, support for host targets is not properly tested.
"""
format = 'android'
# Note that we can't use mmm as the build tool because ...
# - it builds all targets, whereas we need to pass a target
# - it is a function, whereas the test runner assumes the build tool is a file
# Instead we use make and duplicate the logic from mmm.
build_tool_list = ['make']
# We use our custom target 'gyp_all_modules', as opposed to the 'all_modules'
# target used by mmm, to build only those targets which are part of the gyp
# target 'all'.
ALL = 'gyp_all_modules'
def __init__(self, gyp=None, *args, **kw):
# Android requires build and test output to be inside its source tree.
# We use the following working directory for the test's source, but the
# test's build output still goes to $ANDROID_PRODUCT_OUT.
# Note that some tests explicitly set format='gypd' to invoke the gypd
# backend. This writes to the source tree, but there's no way around this.
kw['workdir'] = os.path.join('/tmp', 'gyptest',
kw.get('workdir', 'testworkarea'))
# We need to remove all gyp outputs from out/. Ths is because some tests
# don't have rules to regenerate output, so they will simply re-use stale
# output if present. Since the test working directory gets regenerated for
# each test run, this can confuse things.
# We don't have a list of build outputs because we don't know which
# dependent targets were built. Instead we delete all gyp-generated output.
# This may be excessive, but should be safe.
out_dir = os.environ['ANDROID_PRODUCT_OUT']
obj_dir = os.path.join(out_dir, 'obj')
shutil.rmtree(os.path.join(obj_dir, 'GYP'), ignore_errors = True)
for x in ['EXECUTABLES', 'STATIC_LIBRARIES', 'SHARED_LIBRARIES']:
for d in os.listdir(os.path.join(obj_dir, x)):
if d.endswith('_gyp_intermediates'):
shutil.rmtree(os.path.join(obj_dir, x, d), ignore_errors = True)
for x in [os.path.join('obj', 'lib'), os.path.join('system', 'lib')]:
for d in os.listdir(os.path.join(out_dir, x)):
if d.endswith('_gyp.so'):
os.remove(os.path.join(out_dir, x, d))
super(TestGypAndroid, self).__init__(*args, **kw)
def target_name(self, target):
if target == self.ALL:
return self.ALL
# The default target is 'droid'. However, we want to use our special target
# to build only the gyp target 'all'.
if target in (None, self.DEFAULT):
return self.ALL
return target
def build(self, gyp_file, target=None, **kw):
"""
Runs a build using the Android makefiles generated from the specified
gyp_file. This logic is taken from Android's mmm.
"""
arguments = kw.get('arguments', [])[:]
arguments.append(self.target_name(target))
arguments.append('-C')
arguments.append(os.environ['ANDROID_BUILD_TOP'])
kw['arguments'] = arguments
chdir = kw.get('chdir', '')
makefile = os.path.join(self.workdir, chdir, 'GypAndroid.mk')
os.environ['ONE_SHOT_MAKEFILE'] = makefile
result = self.run(program=self.build_tool, **kw)
del os.environ['ONE_SHOT_MAKEFILE']
return result
def android_module(self, group, name, subdir):
if subdir:
name = '%s_%s' % (subdir, name)
if group == 'SHARED_LIBRARIES':
name = 'lib_%s' % name
return '%s_gyp' % name
def intermediates_dir(self, group, module_name):
return os.path.join(os.environ['ANDROID_PRODUCT_OUT'], 'obj', group,
'%s_intermediates' % module_name)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Android. Note that we don't support the configuration
parameter.
"""
# Built files are in $ANDROID_PRODUCT_OUT. This requires copying logic from
# the Android build system.
if type == None:
return os.path.join(os.environ['ANDROID_PRODUCT_OUT'], 'obj', 'GYP',
'shared_intermediates', name)
subdir = kw.get('subdir')
if type == self.EXECUTABLE:
# We don't install executables
group = 'EXECUTABLES'
module_name = self.android_module(group, name, subdir)
return os.path.join(self.intermediates_dir(group, module_name), name)
if type == self.STATIC_LIB:
group = 'STATIC_LIBRARIES'
module_name = self.android_module(group, name, subdir)
return os.path.join(self.intermediates_dir(group, module_name),
'%s.a' % module_name)
if type == self.SHARED_LIB:
group = 'SHARED_LIBRARIES'
module_name = self.android_module(group, name, subdir)
return os.path.join(self.intermediates_dir(group, module_name), 'LINKED',
'%s.so' % module_name)
assert False, 'Unhandled type'
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
This is not correctly implemented for Android. For now, we simply check
that the executable file exists.
"""
# Running executables requires a device. Even if we build for target x86,
# the binary is not built with the correct toolchain options to actually
# run on the host.
# Copied from TestCommon.run()
match = kw.pop('match', self.match)
status = None
if os.path.exists(self.built_file_path(name)):
status = 1
self._complete(None, None, None, None, status, match)
def match_single_line(self, lines = None, expected_line = None):
"""
Checks that specified line appears in the text.
"""
for line in lines.split('\n'):
if line == expected_line:
return 1
return
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
"""
kw['stdout'] = ("make: Nothing to be done for `%s'." %
self.target_name(target))
# We need to supply a custom matcher, since we don't want to depend on the
# exact stdout string.
kw['match'] = self.match_single_line
return self.build(gyp_file, target, **kw)
class TestGypCMake(TestGypBase):
"""
Subclass for testing the GYP CMake generator, using cmake's ninja backend.
"""
format = 'cmake'
build_tool_list = ['cmake']
ALL = 'all'
def cmake_build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
self.build_tool_list = ['cmake']
self.initialize_build_tool()
chdir = os.path.join(kw.get('chdir', '.'),
'out',
self.configuration_dirname())
kw['chdir'] = chdir
arguments.append('-G')
arguments.append('Ninja')
kw['arguments'] = arguments
stderr = kw.get('stderr', None)
if stderr:
kw['stderr'] = stderr.split('$$$')[0]
self.run(program=self.build_tool, **kw)
def ninja_build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
self.build_tool_list = ['ninja']
self.initialize_build_tool()
# Add a -C output/path to the command line.
arguments.append('-C')
arguments.append(os.path.join('out', self.configuration_dirname()))
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
stderr = kw.get('stderr', None)
if stderr:
stderrs = stderr.split('$$$')
kw['stderr'] = stderrs[1] if len(stderrs) > 1 else ''
return self.run(program=self.build_tool, **kw)
def build(self, gyp_file, target=None, status=0, **kw):
# Two tools must be run to build, cmake and the ninja.
# Allow cmake to succeed when the overall expectation is to fail.
if status is None:
kw['status'] = None
else:
if not isinstance(status, collections.Iterable): status = (status,)
kw['status'] = list(itertools.chain((0,), status))
self.cmake_build(gyp_file, target, **kw)
kw['status'] = status
self.ninja_build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
if sys.platform == 'darwin':
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
if sys.platform != 'darwin':
result.append('obj.target')
elif type == self.SHARED_LIB:
if sys.platform != 'darwin' and sys.platform != 'win32':
result.append('lib.target')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.ninja_build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
class TestGypMake(TestGypBase):
"""
Subclass for testing the GYP Make generator.
"""
format = 'make'
build_tool_list = ['make']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a Make build using the Makefiles generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])[:]
if self.configuration:
arguments.append('BUILDTYPE=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
# Sub-directory builds provide per-gyp Makefiles (i.e.
# Makefile.gyp_filename), so use that if there is no Makefile.
chdir = kw.get('chdir', '')
if not os.path.exists(os.path.join(chdir, 'Makefile')):
print "NO Makefile in " + os.path.join(chdir, 'Makefile')
arguments.insert(0, '-f')
arguments.insert(1, os.path.splitext(gyp_file)[0] + '.Makefile')
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Make target is up to date.
"""
if target in (None, self.DEFAULT):
message_target = 'all'
else:
message_target = target
kw['stdout'] = "make: Nothing to be done for `%s'.\n" % message_target
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Make.
"""
configuration = self.configuration_dirname()
libdir = os.path.join('out', configuration, 'lib')
# TODO(piman): when everything is cross-compile safe, remove lib.target
if sys.platform == 'darwin':
# Mac puts target shared libraries right in the product directory.
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = (
libdir + '.host:' + os.path.join('out', configuration))
else:
os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Make.
Built files are in the subdirectory 'out/{configuration}'.
The default is 'out/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
A subdir= keyword argument specifies a library subdirectory within
the default 'obj.target'.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['out', configuration])
if type == self.STATIC_LIB and sys.platform != 'darwin':
result.append('obj.target')
elif type == self.SHARED_LIB and sys.platform != 'darwin':
result.append('lib.target')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def FindVisualStudioInstallation():
"""Returns appropriate values for .build_tool and .uses_msbuild fields
of TestGypBase for Visual Studio.
We use the value specified by GYP_MSVS_VERSION. If not specified, we
search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable.
Failing that, we search for likely deployment paths.
"""
possible_roots = ['%s:\\Program Files%s' % (chr(drive), suffix)
for drive in range(ord('C'), ord('Z') + 1)
for suffix in ['', ' (x86)']]
possible_paths = {
'2013': r'Microsoft Visual Studio 12.0\Common7\IDE\devenv.com',
'2012': r'Microsoft Visual Studio 11.0\Common7\IDE\devenv.com',
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
possible_roots = [ConvertToCygpath(r) for r in possible_roots]
msvs_version = 'auto'
for flag in (f for f in sys.argv if f.startswith('msvs_version=')):
msvs_version = flag.split('=')[-1]
msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)
build_tool = None
if msvs_version in possible_paths:
# Check that the path to the specified GYP_MSVS_VERSION exists.
path = possible_paths[msvs_version]
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
build_tool = bt
uses_msbuild = msvs_version >= '2010'
return build_tool, uses_msbuild
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
if build_tool:
# We found 'devenv' on the path, use that and try to guess the version.
for version, path in possible_paths.iteritems():
if build_tool.find(path) >= 0:
uses_msbuild = version >= '2010'
return build_tool, uses_msbuild
else:
# If not, assume not MSBuild.
uses_msbuild = False
return build_tool, uses_msbuild
# Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
# the choices looking for a match.
for version in sorted(possible_paths, reverse=True):
path = possible_paths[version]
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
build_tool = bt
uses_msbuild = msvs_version >= '2010'
return build_tool, uses_msbuild
print 'Error: could not find devenv'
sys.exit(1)
class TestGypOnMSToolchain(TestGypBase):
"""
Common subclass for testing generators that target the Microsoft Visual
Studio toolchain (cl, link, dumpbin, etc.)
"""
@staticmethod
def _ComputeVsvarsPath(devenv_path):
devenv_dir = os.path.split(devenv_path)[0]
vsvars_path = os.path.join(devenv_path, '../../Tools/vsvars32.bat')
return vsvars_path
def initialize_build_tool(self):
super(TestGypOnMSToolchain, self).initialize_build_tool()
if sys.platform in ('win32', 'cygwin'):
self.devenv_path, self.uses_msbuild = FindVisualStudioInstallation()
self.vsvars_path = TestGypOnMSToolchain._ComputeVsvarsPath(
self.devenv_path)
def run_dumpbin(self, *dumpbin_args):
"""Run the dumpbin tool with the specified arguments, and capturing and
returning stdout."""
assert sys.platform in ('win32', 'cygwin')
cmd = os.environ.get('COMSPEC', 'cmd.exe')
arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']
arguments.extend(dumpbin_args)
proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)
output = proc.communicate()[0]
assert not proc.returncode
return output
class TestGypNinja(TestGypOnMSToolchain):
"""
Subclass for testing the GYP Ninja generator.
"""
format = 'ninja'
build_tool_list = ['ninja']
ALL = 'all'
DEFAULT = 'all'
def run_gyp(self, gyp_file, *args, **kw):
TestGypBase.run_gyp(self, gyp_file, *args, **kw)
def build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
# Add a -C output/path to the command line.
arguments.append('-C')
arguments.append(os.path.join('out', self.configuration_dirname()))
if target is None:
target = 'all'
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def run_built_executable(self, name, *args, **kw):
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
if sys.platform == 'darwin':
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
if sys.platform != 'darwin':
result.append('obj')
elif type == self.SHARED_LIB:
if sys.platform != 'darwin' and sys.platform != 'win32':
result.append('lib')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
class TestGypMSVS(TestGypOnMSToolchain):
"""
Subclass for testing the GYP Visual Studio generator.
"""
format = 'msvs'
u = r'=== Build: 0 succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
up_to_date_re = re.compile(u, re.M)
# Initial None element will indicate to our .initialize_build_tool()
# method below that 'devenv' was not found on %PATH%.
#
# Note: we must use devenv.com to be able to capture build output.
# Directly executing devenv.exe only sends output to BuildLog.htm.
build_tool_list = [None, 'devenv.com']
def initialize_build_tool(self):
super(TestGypMSVS, self).initialize_build_tool()
self.build_tool = self.devenv_path
def build(self, gyp_file, target=None, rebuild=False, clean=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
configuration = self.configuration_buildname()
if clean:
build = '/Clean'
elif rebuild:
build = '/Rebuild'
else:
build = '/Build'
arguments = kw.get('arguments', [])[:]
arguments.extend([gyp_file.replace('.gyp', '.sln'),
build, configuration])
# Note: the Visual Studio generator doesn't add an explicit 'all'
# target, so we just treat it the same as the default.
if target not in (None, self.ALL, self.DEFAULT):
arguments.extend(['/Project', target])
if self.configuration:
arguments.extend(['/ProjectConfig', self.configuration])
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Visual Studio target is up to date.
Beware that VS2010 will behave strangely if you build under
C:\USERS\yourname\AppData\Local. It will cause needless work. The ouptut
will be "1 succeeded and 0 up to date". MSBuild tracing reveals that:
"Project 'C:\Users\...\AppData\Local\...vcxproj' not up to date because
'C:\PROGRAM FILES (X86)\MICROSOFT VISUAL STUDIO 10.0\VC\BIN\1033\CLUI.DLL'
was modified at 02/21/2011 17:03:30, which is newer than '' which was
modified at 01/01/0001 00:00:00.
The workaround is to specify a workdir when instantiating the test, e.g.
test = TestGyp.TestGyp(workdir='workarea')
"""
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
m = self.up_to_date_re.search(stdout)
up_to_date = m and int(m.group(1)) > 0
if not up_to_date:
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Visual Studio.
"""
configuration = self.configuration_dirname()
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Visual Studio.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypXcode(TestGypBase):
"""
Subclass for testing the GYP Xcode generator.
"""
format = 'xcode'
build_tool_list = ['xcodebuild']
phase_script_execution = ("\n"
"PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
" cd /\\S+\n"
" /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
"(make: Nothing to be done for `all'\\.\n)?")
strip_up_to_date_expressions = [
# Various actions or rules can run even when the overall build target
# is up to date. Strip those phases' GYP-generated output.
re.compile(phase_script_execution, re.S),
# The message from distcc_pump can trail the "BUILD SUCCEEDED"
# message, so strip that, too.
re.compile('__________Shutting down distcc-pump include server\n', re.S),
]
up_to_date_endings = (
'Checking Dependencies...\n** BUILD SUCCEEDED **\n', # Xcode 3.0/3.1
'Check dependencies\n** BUILD SUCCEEDED **\n\n', # Xcode 3.2
'Check dependencies\n\n\n** BUILD SUCCEEDED **\n\n', # Xcode 4.2
'Check dependencies\n\n** BUILD SUCCEEDED **\n\n', # Xcode 5.0
)
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
# Be sure we're working with a copy of 'arguments' since we modify it.
# The caller may not be expecting it to be modified.
arguments = kw.get('arguments', [])[:]
arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
if target == self.ALL:
arguments.append('-alltargets',)
elif target not in (None, self.DEFAULT):
arguments.extend(['-target', target])
if self.configuration:
arguments.extend(['-configuration', self.configuration])
symroot = kw.get('SYMROOT', '$SRCROOT/build')
if symroot:
arguments.append('SYMROOT='+symroot)
kw['arguments'] = arguments
# Work around spurious stderr output from Xcode 4, http://crbug.com/181012
match = kw.pop('match', self.match)
def match_filter_xcode(actual, expected):
if actual:
if not TestCmd.is_List(actual):
actual = actual.split('\n')
if not TestCmd.is_List(expected):
expected = expected.split('\n')
actual = [a for a in actual
if 'No recorder, buildTask: <Xcode3BuildTask:' not in a]
return match(actual, expected)
kw['match'] = match_filter_xcode
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Xcode target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
output = self.stdout()
for expression in self.strip_up_to_date_expressions:
output = expression.sub('', output)
if not output.endswith(self.up_to_date_endings):
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Xcode.
Built files are in the subdirectory 'build/{configuration}'.
The default is 'build/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['build', configuration])
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
format_class_list = [
TestGypGypd,
TestGypAndroid,
TestGypCMake,
TestGypMake,
TestGypMSVS,
TestGypNinja,
TestGypXcode,
]
def TestGyp(*args, **kw):
"""
Returns an appropriate TestGyp* instance for a specified GYP format.
"""
format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))
for format_class in format_class_list:
if format == format_class.format:
return format_class(*args, **kw)
raise Exception, "unknown format %r" % format
| bsd-3-clause |
smok-serwis/bunia | bunia/output/base.py | 1 | 1698 | import base64
def transcode_output(data, form, dtype='binary'):
"""
Transcode data (bytes) to target form
:param data: binary data
:param form: one of 'text', 'html', 'raw', 'base64', 'ascii'
:param dtype: one of 'binary' or 'text'
"""
if form not in ('text', 'html', 'raw', 'base64', 'ascii'):
raise ValueError('Invalid form')
if dtype not in ('binary', 'text'):
raise ValueError('Invalid dtype')
if form in ('text', 'html'):
if dtype == 'text':
return data
else:
return data.decode('utf-8')
elif form == 'raw':
if dtype == 'text':
return data.encode('utf-8')
else:
return data
elif form == 'base64':
if dtype == 'text':
data = data.encode('utf-8')
return base64.b64encode(data).decode('utf-8')
elif form == 'ascii':
if dtype == 'binary':
data = data.decode('ascii')
return data
else:
raise ValueError('Invalid form!')
class Output(object):
"""
An access to some form of output. Override and extend as needed. This is constructed
by a Runner in response to command request.
:ivar name: (tp.Optional[str]) - output name
"""
def __init__(self, name=None):
self.name = name
def to(self, form='text'):
"""Output content in some particular form.
It is mandatory to support 'text'.
Possible values are 'text', 'html'. This is the last call to this objects,
as Output can close its resources.
:raises ValueError: form not supported
:return: depends on form. unicode for text and html
"""
| mit |
nervenXC/topical_word_embeddings | TWE-2/last_last_step.py | 3 | 1257 | #!/usr/bin/env python2
#-*- coding: UTF-8 -*-
#File:
#Date:
#Author: Yang Liu <[email protected]>
#Description:
if __name__=="__main__":
with open("test.log","w") as logout, open("result.out") as f, open("result.out.out","w") as fout, open("log.txt") as log:
#log loading
content2id = {}
id2word = {}
for l in log:
word, word_number, topic_number, content_id, _ = l.strip().split()
word_number = int(word_number)
topic_number = int(topic_number)
content_id = int(content_id)
content2id[(word_number, topic_number)] = content_id
id2word[word_number] = word
print "LOADING COMPLETED"
for (line_num, l) in enumerate(f):
word1, topic1, word2, topic2, score = l.strip().split()
word1 = int(word1)
topic1 = int(topic1)
word2 = int(word2)
topic2 = int(topic2)
try:
content1 = content2id[(word1, topic1)]
content2 = content2id[(word2, topic2)]
except:
print line_num
continue
print >> fout, content1, content2, score
print >>logout, id2word[word1], id2word[word2]
| mit |
bhargav2408/python-for-android | python-build/python-libs/gdata/src/gdata/books/service.py | 136 | 11339 | #!/usr/bin/python
"""
Extend gdata.service.GDataService to support authenticated CRUD ops on
Books API
http://code.google.com/apis/books/docs/getting-started.html
http://code.google.com/apis/books/docs/gdata/developers_guide_protocol.html
TODO: (here and __init__)
* search based on label, review, or other annotations (possible?)
* edit (specifically, Put requests) seem to fail effect a change
Problems With API:
* Adding a book with a review to the library adds a note, not a review.
This does not get included in the returned item. You see this by
looking at My Library through the website.
* Editing a review never edits a review (unless it is freshly added, but
see above). More generally,
* a Put request with changed annotations (label/rating/review) does NOT
change the data. Note: Put requests only work on the href from
GetEditLink (as per the spec). Do not try to PUT to the annotate or
library feeds, this will cause a 400 Invalid URI Bad Request response.
Attempting to Post to one of the feeds with the updated annotations
does not update them. See the following for (hopefully) a follow up:
google.com/support/forum/p/booksearch-apis/thread?tid=27fd7f68de438fc8
* Attempts to workaround the edit problem continue to fail. For example,
removing the item, editing the data, readding the item, gives us only
our originally added data (annotations). This occurs even if we
completely shut python down, refetch the book from the public feed,
and re-add it. There is some kind of persistence going on that I
cannot change. This is likely due to the annotations being cached in
the annotation feed and the inability to edit (see Put, above)
* GetAnnotationLink has www.books.... as the server, but hitting www...
results in a bad URI error.
* Spec indicates there may be multiple labels, but there does not seem
to be a way to get the server to accept multiple labels, nor does the
web interface have an obvious way to have multiple labels. Multiple
labels are never returned.
"""
__author__ = "James Sams <[email protected]>"
__copyright__ = "Apache License v2.0"
from shlex import split
import gdata.service
try:
import books
except ImportError:
import gdata.books as books
BOOK_SERVER = "books.google.com"
GENERAL_FEED = "/books/feeds/volumes"
ITEM_FEED = "/books/feeds/volumes/"
LIBRARY_FEED = "/books/feeds/users/%s/collections/library/volumes"
ANNOTATION_FEED = "/books/feeds/users/%s/volumes"
PARTNER_FEED = "/books/feeds/p/%s/volumes"
BOOK_SERVICE = "print"
ACCOUNT_TYPE = "HOSTED_OR_GOOGLE"
class BookService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server=BOOK_SERVER, account_type=ACCOUNT_TYPE,
exception_handlers=tuple(), **kwargs):
"""source should be of form 'ProgramCompany - ProgramName - Version'"""
gdata.service.GDataService.__init__(self, email=email,
password=password, service=BOOK_SERVICE, source=source,
server=server, **kwargs)
self.exception_handlers = exception_handlers
def search(self, q, start_index="1", max_results="10",
min_viewability="none", feed=GENERAL_FEED,
converter=books.BookFeed.FromString):
"""
Query the Public search feed. q is either a search string or a
gdata.service.Query instance with a query set.
min_viewability must be "none", "partial", or "full".
If you change the feed to a single item feed, note that you will
probably need to change the converter to be Book.FromString
"""
if not isinstance(q, gdata.service.Query):
q = gdata.service.Query(text_query=q)
if feed:
q.feed = feed
q['start-index'] = start_index
q['max-results'] = max_results
q['min-viewability'] = min_viewability
return self.Get(uri=q.ToUri(),converter=converter)
def search_by_keyword(self, q='', feed=GENERAL_FEED, start_index="1",
max_results="10", min_viewability="none", **kwargs):
"""
Query the Public Search Feed by keyword. Non-keyword strings can be
set in q. This is quite fragile. Is there a function somewhere in
the Google library that will parse a query the same way that Google
does?
Legal Identifiers are listed below and correspond to their meaning
at http://books.google.com/advanced_book_search:
all_words
exact_phrase
at_least_one
without_words
title
author
publisher
subject
isbn
lccn
oclc
seemingly unsupported:
publication_date: a sequence of two, two tuples:
((min_month,min_year),(max_month,max_year))
where month is one/two digit month, year is 4 digit, eg:
(('1','2000'),('10','2003')). Lower bound is inclusive,
upper bound is exclusive
"""
for k, v in kwargs.items():
if not v:
continue
k = k.lower()
if k == 'all_words':
q = "%s %s" % (q, v)
elif k == 'exact_phrase':
q = '%s "%s"' % (q, v.strip('"'))
elif k == 'at_least_one':
q = '%s %s' % (q, ' '.join('OR "%s"' % x for x in split(v)))
elif k == 'without_words':
q = '%s %s' % (q, ' '.join('-"%s"' % x for x in split(v)))
elif k in ('author','title', 'publisher'):
q = '%s %s' % (q, ' '.join('in%s:"%s"'%(k,x) for x in split(v)))
elif k == 'subject':
q = '%s %s' % (q, ' '.join('%s:"%s"' % (k,x) for x in split(v)))
elif k == 'isbn':
q = '%s ISBN%s' % (q, v)
elif k == 'issn':
q = '%s ISSN%s' % (q,v)
elif k == 'oclc':
q = '%s OCLC%s' % (q,v)
else:
raise ValueError("Unsupported search keyword")
return self.search(q.strip(),start_index=start_index, feed=feed,
max_results=max_results,
min_viewability=min_viewability)
def search_library(self, q, id='me', **kwargs):
"""Like search, but in a library feed. Default is the authenticated
user's feed. Change by setting id."""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = LIBRARY_FEED % id
return self.search(q, feed=feed, **kwargs)
def search_library_by_keyword(self, id='me', **kwargs):
"""Hybrid of search_by_keyword and search_library
"""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = LIBRARY_FEED % id
return self.search_by_keyword(feed=feed,**kwargs)
def search_annotations(self, q, id='me', **kwargs):
"""Like search, but in an annotation feed. Default is the authenticated
user's feed. Change by setting id."""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = ANNOTATION_FEED % id
return self.search(q, feed=feed, **kwargs)
def search_annotations_by_keyword(self, id='me', **kwargs):
"""Hybrid of search_by_keyword and search_annotations
"""
if 'feed' in kwargs:
raise ValueError("kwarg 'feed' conflicts with library_id")
feed = ANNOTATION_FEED % id
return self.search_by_keyword(feed=feed,**kwargs)
def add_item_to_library(self, item):
"""Add the item, either an XML string or books.Book instance, to the
user's library feed"""
feed = LIBRARY_FEED % 'me'
return self.Post(data=item, uri=feed, converter=books.Book.FromString)
def remove_item_from_library(self, item):
"""
Remove the item, a books.Book instance, from the authenticated user's
library feed. Using an item retrieved from a public search will fail.
"""
return self.Delete(item.GetEditLink().href)
def add_annotation(self, item):
"""
Add the item, either an XML string or books.Book instance, to the
user's annotation feed.
"""
# do not use GetAnnotationLink, results in 400 Bad URI due to www
return self.Post(data=item, uri=ANNOTATION_FEED % 'me',
converter=books.Book.FromString)
def edit_annotation(self, item):
"""
Send an edited item, a books.Book instance, to the user's annotation
feed. Note that whereas extra annotations in add_annotations, minus
ratings which are immutable once set, are simply added to the item in
the annotation feed, if an annotation has been removed from the item,
sending an edit request will remove that annotation. This should not
happen with add_annotation.
"""
return self.Put(data=item, uri=item.GetEditLink().href,
converter=books.Book.FromString)
def get_by_google_id(self, id):
return self.Get(ITEM_FEED + id, converter=books.Book.FromString)
def get_library(self, id='me',feed=LIBRARY_FEED, start_index="1",
max_results="100", min_viewability="none",
converter=books.BookFeed.FromString):
"""
Return a generator object that will return gbook.Book instances until
the search feed no longer returns an item from the GetNextLink method.
Thus max_results is not the maximum number of items that will be
returned, but rather the number of items per page of searches. This has
been set high to reduce the required number of network requests.
"""
q = gdata.service.Query()
q.feed = feed % id
q['start-index'] = start_index
q['max-results'] = max_results
q['min-viewability'] = min_viewability
x = self.Get(uri=q.ToUri(), converter=converter)
while 1:
for entry in x.entry:
yield entry
else:
l = x.GetNextLink()
if l: # hope the server preserves our preferences
x = self.Get(uri=l.href, converter=converter)
else:
break
def get_annotations(self, id='me', start_index="1", max_results="100",
min_viewability="none", converter=books.BookFeed.FromString):
"""
Like get_library, but for the annotation feed
"""
return self.get_library(id=id, feed=ANNOTATION_FEED,
max_results=max_results, min_viewability = min_viewability,
converter=converter)
| apache-2.0 |
sekikn/incubator-airflow | airflow/providers/amazon/aws/example_dags/example_datasync_2.py | 10 | 3774 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag for using `AWSDataSyncOperator` in a more complex manner.
- Try to get a TaskArn. If one exists, update it.
- If no tasks exist, try to create a new DataSync Task.
- If source and destination locations don't exist for the new task, create them first
- If many tasks exist, raise an Exception
- After getting or creating a DataSync Task, run it
This DAG relies on the following environment variables:
* SOURCE_LOCATION_URI - Source location URI, usually on premises SMB or NFS
* DESTINATION_LOCATION_URI - Destination location URI, usually S3
* CREATE_TASK_KWARGS - Passed to boto3.create_task(**kwargs)
* CREATE_SOURCE_LOCATION_KWARGS - Passed to boto3.create_location(**kwargs)
* CREATE_DESTINATION_LOCATION_KWARGS - Passed to boto3.create_location(**kwargs)
* UPDATE_TASK_KWARGS - Passed to boto3.update_task(**kwargs)
"""
import json
import re
from os import getenv
from airflow import models
from airflow.providers.amazon.aws.operators.datasync import AWSDataSyncOperator
from airflow.utils.dates import days_ago
# [START howto_operator_datasync_2_args]
SOURCE_LOCATION_URI = getenv("SOURCE_LOCATION_URI", "smb://hostname/directory/")
DESTINATION_LOCATION_URI = getenv("DESTINATION_LOCATION_URI", "s3://mybucket/prefix")
default_create_task_kwargs = '{"Name": "Created by Airflow"}'
CREATE_TASK_KWARGS = json.loads(getenv("CREATE_TASK_KWARGS", default_create_task_kwargs))
default_create_source_location_kwargs = "{}"
CREATE_SOURCE_LOCATION_KWARGS = json.loads(
getenv("CREATE_SOURCE_LOCATION_KWARGS", default_create_source_location_kwargs)
)
bucket_access_role_arn = "arn:aws:iam::11112223344:role/r-11112223344-my-bucket-access-role"
default_destination_location_kwargs = """\
{"S3BucketArn": "arn:aws:s3:::mybucket",
"S3Config": {"BucketAccessRoleArn":
"arn:aws:iam::11112223344:role/r-11112223344-my-bucket-access-role"}
}"""
CREATE_DESTINATION_LOCATION_KWARGS = json.loads(
getenv("CREATE_DESTINATION_LOCATION_KWARGS", re.sub(r"[\s+]", '', default_destination_location_kwargs))
)
default_update_task_kwargs = '{"Name": "Updated by Airflow"}'
UPDATE_TASK_KWARGS = json.loads(getenv("UPDATE_TASK_KWARGS", default_update_task_kwargs))
# [END howto_operator_datasync_2_args]
with models.DAG(
"example_datasync_2",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=['example'],
) as dag:
# [START howto_operator_datasync_2]
datasync_task = AWSDataSyncOperator(
aws_conn_id="aws_default",
task_id="datasync_task",
source_location_uri=SOURCE_LOCATION_URI,
destination_location_uri=DESTINATION_LOCATION_URI,
create_task_kwargs=CREATE_TASK_KWARGS,
create_source_location_kwargs=CREATE_SOURCE_LOCATION_KWARGS,
create_destination_location_kwargs=CREATE_DESTINATION_LOCATION_KWARGS,
update_task_kwargs=UPDATE_TASK_KWARGS,
delete_task_after_execution=True,
)
# [END howto_operator_datasync_2]
| apache-2.0 |
yyt030/pyzmq | zmq/tests/test_ioloop.py | 27 | 3521 | # Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import time
import os
import threading
import zmq
from zmq.tests import BaseZMQTestCase
from zmq.eventloop import ioloop
from zmq.eventloop.minitornado.ioloop import _Timeout
try:
from tornado.ioloop import PollIOLoop, IOLoop as BaseIOLoop
except ImportError:
from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop
def printer():
os.system("say hello")
raise Exception
print (time.time())
class Delay(threading.Thread):
def __init__(self, f, delay=1):
self.f=f
self.delay=delay
self.aborted=False
self.cond=threading.Condition()
super(Delay, self).__init__()
def run(self):
self.cond.acquire()
self.cond.wait(self.delay)
self.cond.release()
if not self.aborted:
self.f()
def abort(self):
self.aborted=True
self.cond.acquire()
self.cond.notify()
self.cond.release()
class TestIOLoop(BaseZMQTestCase):
def test_simple(self):
"""simple IOLoop creation test"""
loop = ioloop.IOLoop()
dc = ioloop.PeriodicCallback(loop.stop, 200, loop)
pc = ioloop.PeriodicCallback(lambda : None, 10, loop)
pc.start()
dc.start()
t = Delay(loop.stop,1)
t.start()
loop.start()
if t.isAlive():
t.abort()
else:
self.fail("IOLoop failed to exit")
def test_timeout_compare(self):
"""test timeout comparisons"""
loop = ioloop.IOLoop()
t = _Timeout(1, 2, loop)
t2 = _Timeout(1, 3, loop)
self.assertEqual(t < t2, id(t) < id(t2))
t2 = _Timeout(2,1, loop)
self.assertTrue(t < t2)
def test_poller_events(self):
"""Tornado poller implementation maps events correctly"""
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
poller = ioloop.ZMQPoller()
poller.register(req, ioloop.IOLoop.READ)
poller.register(rep, ioloop.IOLoop.READ)
events = dict(poller.poll(0))
self.assertEqual(events.get(rep), None)
self.assertEqual(events.get(req), None)
poller.register(req, ioloop.IOLoop.WRITE)
poller.register(rep, ioloop.IOLoop.WRITE)
events = dict(poller.poll(1))
self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
self.assertEqual(events.get(rep), None)
poller.register(rep, ioloop.IOLoop.READ)
req.send(b'hi')
events = dict(poller.poll(1))
self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
self.assertEqual(events.get(req), None)
def test_instance(self):
"""Test IOLoop.instance returns the right object"""
loop = ioloop.IOLoop.instance()
self.assertEqual(loop.__class__, ioloop.IOLoop)
loop = BaseIOLoop.instance()
self.assertEqual(loop.__class__, ioloop.IOLoop)
def test_close_all(self):
"""Test close(all_fds=True)"""
loop = ioloop.IOLoop.instance()
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ)
loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ)
self.assertEqual(req.closed, False)
self.assertEqual(rep.closed, False)
loop.close(all_fds=True)
self.assertEqual(req.closed, True)
self.assertEqual(rep.closed, True)
| bsd-3-clause |
lepture/pythondotorg | jobs/urls.py | 8 | 1590 | from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
from . import feeds
urlpatterns = [
url(r'^$', views.JobList.as_view(), name='job_list'),
url(r'^feed/rss/$', feeds.JobFeed(), name='job_rss'),
url(r'^create/$', views.JobCreate.as_view(), name='job_create'),
url(r'^mine/$', views.JobListMine.as_view(), name='job_list_mine'),
url(r'^review/$', views.JobReview.as_view(), name='job_review'),
url(r'^thanks/$', TemplateView.as_view(template_name="jobs/job_thanks.html"), name='job_thanks'),
url(r'^location/telecommute/$', views.JobTelecommute.as_view(), name='job_telecommute'),
url(r'^location/(?P<slug>[-_\w]+)/$', views.JobListLocation.as_view(), name='job_list_location'),
url(r'^type/(?P<slug>[-_\w]+)/$', views.JobListType.as_view(), name='job_list_type'),
url(r'^category/(?P<slug>[-_\w]+)/$', views.JobListCategory.as_view(), name='job_list_category'),
url(r'^locations/$', views.JobLocations.as_view(), name='job_locations'),
url(r'^types/$', views.JobTypes.as_view(), name='job_types'),
url(r'^categories/$', views.JobCategories.as_view(), name='job_categories'),
url(r'^(?P<pk>\d+)/archive/$', views.JobArchive.as_view(), name='job_archive'),
url(r'^(?P<pk>\d+)/edit/$', views.JobEdit.as_view(), name='job_edit'),
url(r'^(?P<pk>\d+)/publish/$', views.JobPublish.as_view(), name='job_publish'),
url(r'^(?P<pk>\d+)/review/$', views.JobDetailReview.as_view(), name='job_detail_review'),
url(r'^(?P<pk>\d+)/$', views.JobDetail.as_view(), name='job_detail'),
]
| apache-2.0 |
meteorfox/PerfKitBenchmarker | tests/integration/gcp_disk_integration_test.py | 5 | 3009 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for GCE scratch disks."""
import os
import unittest
from perfkitbenchmarker import pkb
from perfkitbenchmarker import test_util
MOUNT_POINT = '/scratch'
@unittest.skipUnless('PERFKIT_INTEGRATION' in os.environ,
'PERFKIT_INTEGRATION not in environment')
class GcpScratchDiskIntegrationTest(unittest.TestCase):
"""Integration tests for GCE disks.
Please see the section on integration testing in the README.
"""
def setUp(self):
pkb.SetUpPKB()
def testPDStandard(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'n1-standard-2',
'zone': 'us-central1-a'
}
},
'disk_spec': {
'GCP': {
'disk_type': 'pd-standard',
'disk_size': 2,
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testPDSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'n1-standard-2',
'zone': 'us-central1-a'
}
},
'disk_spec': {
'GCP': {
'disk_type': 'pd-ssd',
'disk_size': 2,
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testLocalSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'GCP',
'vm_spec': {
'GCP': {
'machine_type': 'n1-standard-2',
'zone': 'us-central1-a',
'num_local_ssds': 1
}
},
'disk_spec': {
'GCP': {
'disk_type': 'local',
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
| apache-2.0 |
neogeographica/quakesounds | bundled_modules/pkg_resources.py | 15 | 101430 | """Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
def exec_(code, globs=None, locs=None):
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
except NameError:
basestring = str
from io import BytesIO
exec_ = eval("exec")
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec_(compile(open(fn).read(), fn, 'exec'), globs, locs)
import functools
reduce = functools.reduce
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
_marker_names = {
'os': ['name'], 'sys': ['platform'],
'platform': ['version','machine','python_implementation'],
'python_version': [], 'python_full_version': [], 'extra':[],
}
_marker_values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': lambda: _platinfo('version'),
'platform_machine': lambda: _platinfo('machine'),
'python_implementation': lambda: _platinfo('python_implementation') or _pyimp(),
}
def _platinfo(attr):
try:
import platform
except ImportError:
return ''
return getattr(platform, attr, lambda:'')()
def _pyimp():
if sys.platform=='cli':
return 'IronPython'
elif sys.platform.startswith('java'):
return 'Jython'
elif '__pypy__' in sys.builtin_module_names:
return 'PyPy'
else:
return 'CPython'
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
def invalid_marker(text):
"""Validate text as a PEP 426 environment marker; return exception or False"""
try:
evaluate_marker(text)
except SyntaxError:
return normalize_exception(sys.exc_info()[1])
return False
def evaluate_marker(text, extra=None, _ops={}):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
if not _ops:
from token import NAME, STRING
import token
import symbol
import operator
def and_test(nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return reduce(operator.and_, [interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
def test(nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return reduce(operator.or_, [interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
def atom(nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
def comparison(nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = _ops[cop]
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(evaluate(nodelist[1]), evaluate(nodelist[3]))
_ops.update({
symbol.test: test, symbol.and_test: and_test, symbol.atom: atom,
symbol.comparison: comparison, 'not in': lambda x,y: x not in y,
'in': lambda x,y: x in y, '==': operator.eq, '!=': operator.ne,
})
if hasattr(symbol,'or_test'):
_ops[symbol.or_test] = test
def interpret(nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = _ops[nodelist[0]]
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
raise SyntaxError("Language feature not supported in environment markers: "+symbol.sym_name[nodelist[0]])
return op(nodelist)
def evaluate(nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
#while len(name)==2: name = name[1]
if kind==NAME:
try:
op = _marker_values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
return interpret(parser.expr(text).totuple(1)[1])
def _markerlib_evaluate(text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# fallback to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec_(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
| gpl-3.0 |
mafiya69/sympy | sympy/combinatorics/subsets.py | 86 | 15802 | from __future__ import print_function, division
from itertools import combinations
from sympy.core import Basic
from sympy.combinatorics.graycode import GrayCode
from sympy.core.compatibility import range
class Subset(Basic):
"""
Represents a basic subset object.
We generate subsets using essentially two techniques,
binary enumeration and lexicographic enumeration.
The Subset class takes two arguments, the first one
describes the initial subset to consider and the second
describes the superset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
['b']
>>> a.prev_binary().subset
['c']
"""
_rank_binary = None
_rank_lex = None
_rank_graycode = None
_subset = None
_superset = None
def __new__(cls, subset, superset):
"""
Default constructor.
It takes the subset and its superset as its parameters.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.subset
['c', 'd']
>>> a.superset
['a', 'b', 'c', 'd']
>>> a.size
2
"""
if len(subset) > len(superset):
raise ValueError('Invalid arguments have been provided. The superset must be larger than the subset.')
for elem in subset:
if elem not in superset:
raise ValueError('The superset provided is invalid as it does not contain the element %i' % elem)
obj = Basic.__new__(cls)
obj._subset = subset
obj._superset = superset
return obj
def iterate_binary(self, k):
"""
This is a helper function. It iterates over the
binary subsets by k steps. This variable can be
both positive or negative.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.iterate_binary(-2).subset
['d']
>>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd'])
>>> a.iterate_binary(2).subset
[]
See Also
========
next_binary, prev_binary
"""
bin_list = Subset.bitlist_from_subset(self.subset, self.superset)
n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size
bits = bin(n)[2:].rjust(self.superset_size, '0')
return Subset.subset_from_bitlist(self.superset, bits)
def next_binary(self):
"""
Generates the next binary ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
['b']
>>> a = Subset(['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_binary().subset
[]
See Also
========
prev_binary, iterate_binary
"""
return self.iterate_binary(1)
def prev_binary(self):
"""
Generates the previous binary ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a', 'b', 'c', 'd'])
>>> a.prev_binary().subset
['a', 'b', 'c', 'd']
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.prev_binary().subset
['c']
See Also
========
next_binary, iterate_binary
"""
return self.iterate_binary(-1)
def next_lexicographic(self):
"""
Generates the next lexicographically ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.next_lexicographic().subset
['d']
>>> a = Subset(['d'], ['a', 'b', 'c', 'd'])
>>> a.next_lexicographic().subset
[]
See Also
========
prev_lexicographic
"""
i = self.superset_size - 1
indices = Subset.subset_indices(self.subset, self.superset)
if i in indices:
if i - 1 in indices:
indices.remove(i - 1)
else:
indices.remove(i)
i = i - 1
while not i in indices and i >= 0:
i = i - 1
if i >= 0:
indices.remove(i)
indices.append(i+1)
else:
while i not in indices and i >= 0:
i = i - 1
indices.append(i + 1)
ret_set = []
super_set = self.superset
for i in indices:
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
def prev_lexicographic(self):
"""
Generates the previous lexicographically ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a', 'b', 'c', 'd'])
>>> a.prev_lexicographic().subset
['d']
>>> a = Subset(['c','d'], ['a', 'b', 'c', 'd'])
>>> a.prev_lexicographic().subset
['c']
See Also
========
next_lexicographic
"""
i = self.superset_size - 1
indices = Subset.subset_indices(self.subset, self.superset)
while i not in indices and i >= 0:
i = i - 1
if i - 1 in indices or i == 0:
indices.remove(i)
else:
if i >= 0:
indices.remove(i)
indices.append(i - 1)
indices.append(self.superset_size - 1)
ret_set = []
super_set = self.superset
for i in indices:
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
def iterate_graycode(self, k):
"""
Helper function used for prev_gray and next_gray.
It performs k step overs to get the respective Gray codes.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([1, 2, 3], [1, 2, 3, 4])
>>> a.iterate_graycode(3).subset
[1, 4]
>>> a.iterate_graycode(-2).subset
[1, 2, 4]
See Also
========
next_gray, prev_gray
"""
unranked_code = GrayCode.unrank(self.superset_size,
(self.rank_gray + k) % self.cardinality)
return Subset.subset_from_bitlist(self.superset,
unranked_code)
def next_gray(self):
"""
Generates the next Gray code ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([1, 2, 3], [1, 2, 3, 4])
>>> a.next_gray().subset
[1, 3]
See Also
========
iterate_graycode, prev_gray
"""
return self.iterate_graycode(1)
def prev_gray(self):
"""
Generates the previous Gray code ordered subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([2, 3, 4], [1, 2, 3, 4, 5])
>>> a.prev_gray().subset
[2, 3, 4, 5]
See Also
========
iterate_graycode, next_gray
"""
return self.iterate_graycode(-1)
@property
def rank_binary(self):
"""
Computes the binary ordered rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset([], ['a','b','c','d'])
>>> a.rank_binary
0
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.rank_binary
3
See Also
========
iterate_binary, unrank_binary
"""
if self._rank_binary is None:
self._rank_binary = int("".join(
Subset.bitlist_from_subset(self.subset,
self.superset)), 2)
return self._rank_binary
@property
def rank_lexicographic(self):
"""
Computes the lexicographic ranking of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.rank_lexicographic
14
>>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6])
>>> a.rank_lexicographic
43
"""
if self._rank_lex is None:
def _ranklex(self, subset_index, i, n):
if subset_index == [] or i > n:
return 0
if i in subset_index:
subset_index.remove(i)
return 1 + _ranklex(self, subset_index, i + 1, n)
return 2**(n - i - 1) + _ranklex(self, subset_index, i + 1, n)
indices = Subset.subset_indices(self.subset, self.superset)
self._rank_lex = _ranklex(self, indices, 0, self.superset_size)
return self._rank_lex
@property
def rank_gray(self):
"""
Computes the Gray code ranking of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c','d'], ['a','b','c','d'])
>>> a.rank_gray
2
>>> a = Subset([2, 4, 5], [1, 2, 3, 4, 5, 6])
>>> a.rank_gray
27
See Also
========
iterate_graycode, unrank_gray
"""
if self._rank_graycode is None:
bits = Subset.bitlist_from_subset(self.subset, self.superset)
self._rank_graycode = GrayCode(len(bits), start=bits).rank
return self._rank_graycode
@property
def subset(self):
"""
Gets the subset represented by the current instance.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.subset
['c', 'd']
See Also
========
superset, size, superset_size, cardinality
"""
return self._subset
@property
def size(self):
"""
Gets the size of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.size
2
See Also
========
subset, superset, superset_size, cardinality
"""
return len(self.subset)
@property
def superset(self):
"""
Gets the superset of the subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.superset
['a', 'b', 'c', 'd']
See Also
========
subset, size, superset_size, cardinality
"""
return self._superset
@property
def superset_size(self):
"""
Returns the size of the superset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.superset_size
4
See Also
========
subset, superset, size, cardinality
"""
return len(self.superset)
@property
def cardinality(self):
"""
Returns the number of all possible subsets.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
>>> a.cardinality
16
See Also
========
subset, superset, size, superset_size
"""
return 2**(self.superset_size)
@classmethod
def subset_from_bitlist(self, super_set, bitlist):
"""
Gets the subset defined by the bitlist.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.subset_from_bitlist(['a', 'b', 'c', 'd'], '0011').subset
['c', 'd']
See Also
========
bitlist_from_subset
"""
if len(super_set) != len(bitlist):
raise ValueError("The sizes of the lists are not equal")
ret_set = []
for i in range(len(bitlist)):
if bitlist[i] == '1':
ret_set.append(super_set[i])
return Subset(ret_set, super_set)
@classmethod
def bitlist_from_subset(self, subset, superset):
"""
Gets the bitlist corresponding to a subset.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.bitlist_from_subset(['c', 'd'], ['a', 'b', 'c', 'd'])
'0011'
See Also
========
subset_from_bitlist
"""
bitlist = ['0'] * len(superset)
if type(subset) is Subset:
subset = subset.args[0]
for i in Subset.subset_indices(subset, superset):
bitlist[i] = '1'
return ''.join(bitlist)
@classmethod
def unrank_binary(self, rank, superset):
"""
Gets the binary ordered subset of the specified rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset
['b']
See Also
========
iterate_binary, rank_binary
"""
bits = bin(rank)[2:].rjust(len(superset), '0')
return Subset.subset_from_bitlist(superset, bits)
@classmethod
def unrank_gray(self, rank, superset):
"""
Gets the Gray code ordered subset of the specified rank.
Examples
========
>>> from sympy.combinatorics.subsets import Subset
>>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset
['a', 'b']
>>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset
[]
See Also
========
iterate_graycode, rank_gray
"""
graycode_bitlist = GrayCode.unrank(len(superset), rank)
return Subset.subset_from_bitlist(superset, graycode_bitlist)
@classmethod
def subset_indices(self, subset, superset):
"""Return indices of subset in superset in a list; the list is empty
if all elements of subset are not in superset.
Examples
========
>>> from sympy.combinatorics import Subset
>>> superset = [1, 3, 2, 5, 4]
>>> Subset.subset_indices([3, 2, 1], superset)
[1, 2, 0]
>>> Subset.subset_indices([1, 6], superset)
[]
>>> Subset.subset_indices([], superset)
[]
"""
a, b = superset, subset
sb = set(b)
d = {}
for i, ai in enumerate(a):
if ai in sb:
d[ai] = i
sb.remove(ai)
if not sb:
break
else:
return list()
return [d[bi] for bi in b]
def ksubsets(superset, k):
"""
Finds the subsets of size k in lexicographic order.
This uses the itertools generator.
Examples
========
>>> from sympy.combinatorics.subsets import ksubsets
>>> list(ksubsets([1, 2, 3], 2))
[(1, 2), (1, 3), (2, 3)]
>>> list(ksubsets([1, 2, 3, 4, 5], 2))
[(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), \
(2, 5), (3, 4), (3, 5), (4, 5)]
See Also
========
class:Subset
"""
return combinations(superset, k)
| bsd-3-clause |
Lilykos/invenio | invenio/celery/testsuite/helpers.py | 19 | 1769 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
"""
Celery unit tests helper
"""
from __future__ import absolute_import
from celery import Celery
from invenio.testsuite import InvenioTestCase
from invenio.celery import InvenioLoader
class CeleryTestCase(InvenioTestCase):
def create_celery_app(self):
# Execute tasks synchronously
self.app.config['CELERY_ALWAYS_EAGER'] = True
# Set in-memory result backend
self.app.config['CELERY_RESULT_BACKEND'] = 'cache'
self.app.config['CELERY_CACHE_BACKEND'] = 'memory'
# Don't silence exceptions in tasks.
self.app.config['CELERY_EAGER_PROPAGATES_EXCEPTIONS'] = True
self.celery_app = Celery(
'invenio-test',
loader=InvenioLoader,
flask_app=self.app,
)
self.celery_app.loader.import_default_modules()
def destroy_celery_app(self):
del self.celery_app
def setUp(self):
self.create_celery_app()
def tearDown(self):
self.destroy_celery_app()
| gpl-2.0 |
agry/NGECore2 | scripts/mobiles/dathomir/shear_mite_broodling.py | 2 | 1557 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('shear_mite_broodling')
mobileTemplate.setLevel(69)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Insect Meat")
mobileTemplate.setMeatAmount(15)
mobileTemplate.setHideType("Scaley Hide")
mobileTemplate.setHideAmount(3)
mobileTemplate.setSocialGroup("shearmite")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_shear_mite_broodling.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bolster_armor_3')
attacks.add('bm_enfeeble_3')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('shear_mite_broodling', mobileTemplate)
return | lgpl-3.0 |
li-xinyang/FSND_P2_TournamentResults | tournament/tournament.py | 1 | 2214 | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
def deletePlayers():
"""Remove all the player records from the database."""
def countPlayers():
"""Returns the number of players currently registered."""
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
| mit |
avanov/django | tests/extra_regress/models.py | 166 | 1368 | from __future__ import unicode_literals
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RevisionableModel(models.Model):
base = models.ForeignKey('self', null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return "%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self, *args, **kwargs):
super(RevisionableModel, self).save(*args, **kwargs)
if not self.base:
self.base = self
kwargs.pop('force_insert', None)
kwargs.pop('force_update', None)
super(RevisionableModel, self).save(*args, **kwargs)
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
class Order(models.Model):
created_by = models.ForeignKey(User)
text = models.TextField()
@python_2_unicode_compatible
class TestObject(models.Model):
first = models.CharField(max_length=20)
second = models.CharField(max_length=20)
third = models.CharField(max_length=20)
def __str__(self):
return 'TestObject: %s,%s,%s' % (self.first, self.second, self.third)
| bsd-3-clause |
big-pegasus/spark | examples/src/main/python/mllib/logistic_regression.py | 85 | 1826 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Logistic regression using MLlib.
This example requires NumPy (http://www.numpy.org/).
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithSGD
def parsePoint(line):
"""
Parse a line of text into an MLlib LabeledPoint object.
"""
values = [float(s) for s in line.split(' ')]
if values[0] == -1: # Convert -1 labels to 0 for MLlib
values[0] = 0
return LabeledPoint(values[0], values[1:])
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: logistic_regression <file> <iterations>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonLR")
points = sc.textFile(sys.argv[1]).map(parsePoint)
iterations = int(sys.argv[2])
model = LogisticRegressionWithSGD.train(points, iterations)
print("Final weights: " + str(model.weights))
print("Final intercept: " + str(model.intercept))
sc.stop()
| apache-2.0 |
ksmaheshkumar/pysecdump | wpc/users.py | 6 | 1807 | from wpc.user import user
import win32net
import wpc.conf
class users():
def __init__(self):
self.users = []
def get_filtered(self, ):
if self.users == []:
#try:
level = 1
resume = 0
while True:
userlist, total, resume = win32net.NetUserEnum(wpc.conf.remote_server, level, 0, resume, 999999)
#print u
for u in userlist:
# self.users.append(user['name'])
#try:
sid, name, type = wpc.conf.cache.LookupAccountName(wpc.conf.remote_server, u['name'])
self.users.append(user(sid))
#except:
# print "[E] failed to lookup sid of %s" % user['name']
if resume == 0:
break
return self.users
def get_all(self):
if self.users == []:
#try:
level = 0
resume = 0
while True:
userlist, total, resume = win32net.NetUserEnum(wpc.conf.remote_server, level, 0, resume, 999999)
#print u
for u in userlist:
# self.users.append(user['name'])
#try:
sid, name, type = wpc.conf.cache.LookupAccountName(wpc.conf.remote_server, u['name'])
self.users.append(user(sid))
#except:
# print "[E] failed to lookup sid of %s" % user['name']
if resume == 0:
break
#except:
# print "[E] NetUserEnum failed"
return self.users
| gpl-3.0 |
liu602348184/django | tests/model_inheritance/models.py | 99 | 4729 | """
XX. Model inheritance
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
#
# Abstract base classes
#
@python_2_unicode_compatible
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __str__(self):
return '%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
#
# Abstract base classes with related models
#
class Post(models.Model):
title = models.CharField(max_length=50)
@python_2_unicode_compatible
class Attachment(models.Model):
post = models.ForeignKey(Post, related_name='attached_%(class)s_set')
content = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.content
class Comment(Attachment):
is_spam = models.BooleanField(default=False)
class Link(Attachment):
url = models.URLField()
#
# Multi-table inheritance
#
@python_2_unicode_compatible
class Chef(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "%s the chef" % self.name
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
@python_2_unicode_compatible
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
chef = models.ForeignKey(Chef, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
def __str__(self):
return "%s the supplier" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, related_name='lot')
def __str__(self):
return "%s the parking lot" % self.name
#
# Abstract base classes with related models where the sub-class has the
# same name in a different app and inherits from the same abstract base
# class.
# NOTE: The actual API tests for the following classes are in
# model_inheritance_same_model_name/models.py - They are defined
# here in order to have the name conflict between apps
#
class Title(models.Model):
title = models.CharField(max_length=50)
class NamedURL(models.Model):
title = models.ForeignKey(Title, related_name='attached_%(app_label)s_%(class)s_set')
url = models.URLField()
class Meta:
abstract = True
@python_2_unicode_compatible
class Copy(NamedURL):
content = models.TextField()
def __str__(self):
return self.content
class Mixin(object):
def __init__(self):
self.other_attr = 1
super(Mixin, self).__init__()
class MixinModel(models.Model, Mixin):
pass
class Base(models.Model):
titles = models.ManyToManyField(Title)
class SubBase(Base):
sub_id = models.IntegerField(primary_key=True)
class GrandParent(models.Model):
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
email = models.EmailField(unique=True)
class Meta:
unique_together = ('first_name', 'last_name')
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
| bsd-3-clause |
muffinresearch/olympia | apps/addons/helpers.py | 17 | 10485 | import jinja2
from jingo import register
from tower import ugettext as _
from . import buttons
from amo.utils import chunked
register.function(buttons.install_button)
register.function(buttons.big_install_button)
register.function(buttons.mobile_install_button)
@register.filter
@jinja2.contextfilter
def statusflags(context, addon):
"""unreviewed/recommended status flags for use as CSS classes"""
app = context['APP']
lang = context['LANG']
if addon.is_unreviewed():
return 'unreviewed'
elif addon.is_featured(app, lang):
return 'featuredaddon'
else:
return ''
@register.filter
@jinja2.contextfilter
def flag(context, addon):
"""unreviewed/recommended flag heading."""
status = statusflags(context, addon)
msg = {'unreviewed': _('Not Reviewed'), 'featuredaddon': _('Featured')}
if status:
return jinja2.Markup(u'<h5 class="flag">%s</h5>' % msg[status])
else:
return ''
@register.inclusion_tag('addons/performance_note.html')
@jinja2.contextfunction
def performance_note(context, amount, listing=False):
return new_context(**locals())
@register.inclusion_tag('addons/impala/performance_note.html')
@jinja2.contextfunction
def impala_performance_note(context, amount, listing=False):
return new_context(**locals())
@register.inclusion_tag('addons/impala/dependencies_note.html')
@jinja2.contextfunction
def dependencies_note(context, addon, module_context='impala'):
return new_context(**locals())
@register.inclusion_tag('addons/contribution.html')
@jinja2.contextfunction
def contribution(context, addon, text=None, src='', show_install=False,
show_help=True, large=False, contribution_src=None):
"""
Show a contribution box.
Parameters:
addon
text: The begging text at the top of the box.
src: The page where the contribution link is coming from.
show_install: Whether or not to show the install button.
show_help: Show "What's this?" link?
contribution_src: The source for the contribution src,
will use src if not provided.
"""
if not contribution_src:
contribution_src = src
has_suggested = bool(addon.suggested_amount)
return new_context(**locals())
@register.inclusion_tag('addons/impala/contribution.html')
@jinja2.contextfunction
def impala_contribution(context, addon, text=None, src='', show_install=False,
show_help=True, large=False, contribution_src=None):
"""
Show a contribution box.
Parameters:
addon
text: The begging text at the top of the box.
src: The page where the contribution link is coming from.
show_install: Whether or not to show the install button.
show_help: Show "What's this?" link?
contribution_src: The source for the contribution src,
will use src if not provided.
"""
if not contribution_src:
contribution_src = src
has_suggested = bool(addon.suggested_amount)
return new_context(**locals())
@register.inclusion_tag('addons/review_list_box.html')
@jinja2.contextfunction
def review_list_box(context, addon, reviews):
"""Details page: Show a box with three add-on reviews."""
c = dict(context.items())
c.update(addon=addon, reviews=reviews)
return c
@register.inclusion_tag('addons/impala/review_list_box.html')
@jinja2.contextfunction
def impala_review_list_box(context, addon, reviews):
"""Details page: Show a box with three add-on reviews."""
c = dict(context.items())
c.update(addon=addon, reviews=reviews)
return c
@register.inclusion_tag('addons/review_add_box.html')
@jinja2.contextfunction
def review_add_box(context, addon):
"""Details page: Show a box for the user to post a review."""
c = dict(context.items())
c['addon'] = addon
return c
@register.inclusion_tag('addons/impala/review_add_box.html')
@jinja2.contextfunction
def impala_review_add_box(context, addon):
"""Details page: Show a box for the user to post a review."""
c = dict(context.items())
c['addon'] = addon
return c
@register.inclusion_tag('addons/tags_box.html')
@jinja2.contextfunction
def tags_box(context, addon, tags=None):
"""
Details page: Show a box with existing tags along with a form to add new
ones.
"""
c = dict(context.items())
c.update({'addon': addon,
'tags': tags})
return c
@register.inclusion_tag('addons/listing/items.html')
@jinja2.contextfunction
def addon_listing_items(context, addons, show_date=False,
show_downloads=False, src=None, notes={}):
return new_context(**locals())
@register.inclusion_tag('addons/impala/listing/items.html')
@jinja2.contextfunction
def impala_addon_listing_items(context, addons, field=None, src=None,
dl_src=None, notes={}):
if not src:
src = context.get('src')
if not dl_src:
dl_src = context.get('dl_src', src)
return new_context(**locals())
@register.inclusion_tag('addons/listing/items_compact.html')
@jinja2.contextfunction
def addon_listing_items_compact(context, addons, show_date=False, src=None):
return new_context(**locals())
@register.inclusion_tag('addons/listing/items_mobile.html')
@jinja2.contextfunction
def addon_listing_items_mobile(context, addons, sort=None, src=None):
return new_context(**locals())
@register.inclusion_tag('addons/listing_header.html')
@jinja2.contextfunction
def addon_listing_header(context, url_base, sort_opts, selected):
return new_context(**locals())
@register.inclusion_tag('addons/impala/listing/sorter.html')
@jinja2.contextfunction
def impala_addon_listing_header(context, url_base, sort_opts={}, selected=None,
extra_sort_opts={}, search_filter=None):
if search_filter:
selected = search_filter.field
sort_opts = search_filter.opts
if hasattr(search_filter, 'extras'):
extra_sort_opts = search_filter.extras
# When an "extra" sort option becomes selected, it will appear alongside
# the normal sort options.
old_extras = extra_sort_opts
sort_opts, extra_sort_opts = list(sort_opts), []
for k, v in old_extras:
if k == selected:
sort_opts.append((k, v, True))
else:
extra_sort_opts.append((k, v))
return new_context(**locals())
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/sidebar_listing.html')
def sidebar_listing(context, addon):
return new_context(**locals())
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/addon_hovercard.html')
def addon_hovercard(context, addon, lazyload=False, src=None, dl_src=None):
if not src:
src = context.get('src')
if not dl_src:
dl_src = context.get('dl_src', src)
vital_summary = context.get('vital_summary') or 'rating'
vital_more = context.get('vital_more')
if 'vital_more' not in context:
vital_more = 'adu'
return new_context(**locals())
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/addon_grid.html')
def addon_grid(context, addons, src=None, dl_src=None, pagesize=6, cols=2,
vital_summary='rating', vital_more='adu'):
if not src:
src = context.get('src')
# dl_src is an optional src parameter just for the download links
if not dl_src:
dl_src = context.get('dl_src', src)
pages = chunked(addons, pagesize)
columns = 'cols-%d' % cols
return new_context(**locals())
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/featured_grid.html')
def featured_grid(context, addons, src=None, dl_src=None, pagesize=3, cols=3):
if not src:
src = context.get('src')
# dl_src is an optional src paramater just for the download links
if not dl_src:
dl_src = src
pages = chunked(addons, pagesize)
columns = '' if cols != 3 else 'three-col'
return new_context(**locals())
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/toplist.html')
def addon_toplist(context, addons, vital='users', src=None):
return new_context(**locals())
def new_context(context, **kw):
c = dict(context.items())
c.update(kw)
return c
@register.inclusion_tag('addons/persona_preview.html')
@jinja2.contextfunction
def persona_preview(context, persona, size='large', linked=True, extra=None,
details=False, title=False, caption=False, url=None):
preview_map = {'large': persona.preview_url,
'small': persona.thumb_url}
addon = persona.addon
c = dict(context.items())
c.update({'persona': persona, 'addon': addon, 'linked': linked,
'size': size, 'preview': preview_map[size], 'extra': extra,
'details': details, 'title': title, 'caption': caption,
'url_': url})
return c
@register.inclusion_tag('addons/mobile/persona_preview.html')
@jinja2.contextfunction
def mobile_persona_preview(context, persona):
addon = persona.addon
c = dict(context.items())
c.update({'persona': persona, 'addon': addon})
return c
@register.inclusion_tag('addons/mobile/persona_confirm.html')
@jinja2.contextfunction
def mobile_persona_confirm(context, persona, size='large'):
addon = persona.addon
c = dict(context.items())
c.update({'persona': persona, 'addon': addon, 'size': size})
return c
@register.inclusion_tag('addons/persona_grid.html')
@jinja2.contextfunction
def persona_grid(context, addons):
return new_context(**locals())
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/persona_grid.html')
def impala_persona_grid(context, personas, src=None, pagesize=6, cols=3):
c = dict(context.items())
return dict(pages=chunked(personas, pagesize),
columns='cols-%d' % cols, **c)
@register.filter
@jinja2.contextfilter
@register.inclusion_tag('addons/impala/theme_grid.html')
def theme_grid(context, themes, src=None, dl_src=None):
src = context.get('src', src)
if not dl_src:
dl_src = context.get('dl_src', src)
return new_context(**locals())
@register.inclusion_tag('addons/report_abuse.html')
@jinja2.contextfunction
def addon_report_abuse(context, hide, addon):
return new_context(**locals())
| bsd-3-clause |
kawamon/hue | desktop/core/ext-py/thrift-0.13.0/src/transport/TTransport.py | 13 | 13205 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from struct import pack, unpack
from thrift.Thrift import TException
from ..compat import BufferIO
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
NEGATIVE_SIZE = 5
SIZE_LIMIT = 6
INVALID_CLIENT_TYPE = 7
def __init__(self, type=UNKNOWN, message=None, inner=None):
TException.__init__(self, message)
self.type = type
self.inner = inner
class TTransportBase(object):
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = b''
have = 0
while (have < sz):
chunk = self.read(sz - have)
chunkLen = len(chunk)
have += chunkLen
buff += chunk
if chunkLen == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport(object):
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase(object):
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase(object):
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory(object):
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size=DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = BufferIO()
# Pass string argument to initialize read buffer as cStringIO.InputType
self.__rbuf = BufferIO(b'')
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = BufferIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
try:
self.__wbuf.write(buf)
except Exception as e:
# on exception reset wbuf so it doesn't contain a partial function call
self.__wbuf = BufferIO()
raise e
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BufferIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = BufferIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cBytesIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None, offset=0):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = BufferIO(value)
else:
self._buffer = BufferIO()
if offset:
self._buffer.seek(offset)
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory(object):
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = BufferIO(b'')
self.__wbuf = BufferIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = BufferIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BufferIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BufferIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
class TSaslClientTransport(TTransportBase, CReadableTransport):
"""
SASL transport
"""
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
def __init__(self, transport, host, service, mechanism='GSSAPI',
**sasl_kwargs):
"""
transport: an underlying transport to use, typically just a TSocket
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.transport = transport
self.sasl = SASLClient(host, service, mechanism, **sasl_kwargs)
self.__wbuf = BufferIO()
self.__rbuf = BufferIO(b'')
def open(self):
if not self.transport.isOpen():
self.transport.open()
self.send_sasl_msg(self.START, bytes(self.sasl.mechanism, 'ascii'))
self.send_sasl_msg(self.OK, self.sasl.process())
while True:
status, challenge = self.recv_sasl_msg()
if status == self.OK:
self.send_sasl_msg(self.OK, self.sasl.process(challenge))
elif status == self.COMPLETE:
if not self.sasl.complete:
raise TTransportException(
TTransportException.NOT_OPEN,
"The server erroneously indicated "
"that SASL negotiation was complete")
else:
break
else:
raise TTransportException(
TTransportException.NOT_OPEN,
"Bad SASL negotiation status: %d (%s)"
% (status, challenge))
def send_sasl_msg(self, status, body):
header = pack(">BI", status, len(body))
self.transport.write(header + body)
self.transport.flush()
def recv_sasl_msg(self):
header = self.transport.readAll(5)
status, length = unpack(">BI", header)
if length > 0:
payload = self.transport.readAll(length)
else:
payload = ""
return status, payload
def write(self, data):
self.__wbuf.write(data)
def flush(self):
data = self.__wbuf.getvalue()
encoded = self.sasl.wrap(data)
self.transport.write(pack("!i", len(encoded)) + encoded)
self.transport.flush()
self.__wbuf = BufferIO()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self._read_frame()
return self.__rbuf.read(sz)
def _read_frame(self):
header = self.transport.readAll(4)
length, = unpack('!i', header)
encoded = self.transport.readAll(length)
self.__rbuf = BufferIO(self.sasl.unwrap(encoded))
def close(self):
self.sasl.dispose()
self.transport.close()
# based on TFramedTransport
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self._read_frame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BufferIO(prefix)
return self.__rbuf
| apache-2.0 |
Elbagoury/odoo | addons/account_payment/report/payment_order.py | 378 | 2974 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class payment_order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(payment_order, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'get_invoice_name': self._get_invoice_name,
'get_amount_total_in_currency': self._get_amount_total_in_currency,
'get_amount_total': self._get_amount_total,
'get_account_name': self._get_account_name,
})
def _get_invoice_name(self, invoice_id):
if invoice_id:
value_name = self.pool['account.invoice'].name_get(self.cr, self.uid, [invoice_id])
if value_name:
return value_name[0][1]
return False
def _get_amount_total_in_currency(self, payment):
total = 0.0
if payment.line_ids:
currency_cmp = payment.line_ids[0].currency.id
else:
return False
for line in payment.line_ids:
if currency_cmp == line.currency.id:
total += line.amount_currency
else:
return False
return total
def _get_amount_total(self, payment):
total = 0.0
if not payment.line_ids:
return False
for line in payment.line_ids:
total += line.amount
return total
def _get_account_name(self,bank_id):
if bank_id:
value_name = self.pool['res.partner.bank'].name_get(self.cr, self.uid, [bank_id])
if value_name:
return value_name[0][1]
return False
class report_paymentorder(osv.AbstractModel):
_name = 'report.account_payment.report_paymentorder'
_inherit = 'report.abstract_report'
_template = 'account_payment.report_paymentorder'
_wrapped_report_class = payment_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
uranusjr/django | django/urls/conf.py | 90 | 2946 | """Functions for use in URLsconfs."""
from functools import partial
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from .resolvers import (
LocalePrefixPattern, RegexPattern, RoutePattern, URLPattern, URLResolver,
)
def include(arg, namespace=None):
app_name = None
if isinstance(arg, tuple):
# Callable returning a namespace hint.
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that '
'provides a namespace.'
)
raise ImproperlyConfigured(
'Passing a %d-tuple to include() is not supported. Pass a '
'2-tuple containing the list of patterns and app_name, and '
'provide the namespace argument to include() instead.' % len(arg)
)
else:
# No namespace hint - use manually provided namespace.
urlconf_module = arg
if isinstance(urlconf_module, str):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
raise ImproperlyConfigured(
'Specifying a namespace in include() without providing an app_name '
'is not supported. Set the app_name attribute in the included '
'module, or pass a 2-tuple containing the list of patterns and '
'app_name instead.',
)
namespace = namespace or app_name
# Make sure the patterns can be iterated through (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
pattern = getattr(url_pattern, 'pattern', None)
if isinstance(pattern, LocalePrefixPattern):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.'
)
return (urlconf_module, app_name, namespace)
def _path(route, view, kwargs=None, name=None, Pattern=None):
if isinstance(view, (list, tuple)):
# For include(...) processing.
pattern = Pattern(route, is_endpoint=False)
urlconf_module, app_name, namespace = view
return URLResolver(
pattern,
urlconf_module,
kwargs,
app_name=app_name,
namespace=namespace,
)
elif callable(view):
pattern = Pattern(route, name=name, is_endpoint=True)
return URLPattern(pattern, view, kwargs, name)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
path = partial(_path, Pattern=RoutePattern)
re_path = partial(_path, Pattern=RegexPattern)
| bsd-3-clause |
kennethgillen/ansible | lib/ansible/modules/cloud/vmware/vmware_datacenter.py | 70 | 5245 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_datacenter
short_description: Manage VMware vSphere Datacenters
description:
- Manage VMware vSphere Datacenters
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Kamil Szczygiel (@kamsz)"
notes:
- Tested on vSphere 6.0
requirements:
- "python >= 2.6"
- PyVmomi
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter API server
required: True
username:
description:
- The username of the vSphere vCenter
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the vSphere vCenter
required: True
aliases: ['pass', 'pwd']
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: True
state:
description:
- If the datacenter should be present or absent
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_datacenter command from Ansible Playbooks
- name: Create Datacenter
local_action:
module: vmware_datacenter
hostname: "{{ ansible_ssh_host }}"
username: root
password: vmware
datacenter_name: "datacenter"
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def get_datacenter(context, module):
try:
datacenter_name = module.params.get('datacenter_name')
datacenter = find_datacenter_by_name(context, datacenter_name)
return datacenter
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
def create_datacenter(context, module):
datacenter_name = module.params.get('datacenter_name')
folder = context.rootFolder
try:
datacenter = get_datacenter(context, module)
changed = False
if not datacenter:
changed = True
if not module.check_mode:
folder.CreateDatacenter(name=datacenter_name)
module.exit_json(changed=changed)
except vim.fault.DuplicateName:
module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name)
except vim.fault.InvalidName:
module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name)
except vmodl.fault.NotSupported:
# This should never happen
module.fail_json(msg="Trying to create a datacenter on an incorrect folder object")
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
def destroy_datacenter(context, module):
result = None
try:
datacenter = get_datacenter(context, module)
changed = False
if datacenter:
changed = True
if not module.check_mode:
task = datacenter.Destroy_Task()
changed, result = wait_for_task(task)
module.exit_json(changed=changed, result=result)
except vim.fault.VimFault as vim_fault:
module.fail_json(msg=vim_fault.msg)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter_name=dict(required=True, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
context = connect_to_api(module)
state = module.params.get('state')
if state == 'present':
create_datacenter(context, module)
if state == 'absent':
destroy_datacenter(context, module)
from ansible.module_utils.basic import *
from ansible.module_utils.vmware import *
if __name__ == '__main__':
main()
| gpl-3.0 |
QianBIG/odoo | addons/auth_ldap/__init__.py | 442 | 1049 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import users_ldap
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.