code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.connectors import (
BaseConnector, ReceiveInboundConnector, ReceiveOutboundConnector,
IgnoreMessage)
from vumi.tests.utils import LogCatcher
from vumi.worker import BaseWorker
from vumi.message import TransportUserMessage
from vumi.middleware.tests.utils import RecordingMiddleware
from vumi.tests.helpers import VumiTestCase, MessageHelper, WorkerHelper
class DummyWorker(BaseWorker):
def setup_connectors(self):
pass
def setup_worker(self):
pass
def teardown_worker(self):
pass
class BaseConnectorTestCase(VumiTestCase):
connector_class = None
def setUp(self):
self.msg_helper = self.add_helper(MessageHelper())
self.worker_helper = self.add_helper(WorkerHelper())
@inlineCallbacks
def mk_connector(self, worker=None, connector_name=None,
prefetch_count=None, middlewares=None, setup=False):
if worker is None:
worker = yield self.worker_helper.get_worker(DummyWorker, {})
if connector_name is None:
connector_name = "dummy_connector"
connector = self.connector_class(worker, connector_name,
prefetch_count=prefetch_count,
middlewares=middlewares)
if setup:
yield connector.setup()
returnValue(connector)
@inlineCallbacks
def mk_consumer(self, *args, **kwargs):
conn = yield self.mk_connector(*args, **kwargs)
consumer = yield conn._setup_consumer('inbound', TransportUserMessage,
lambda msg: None)
returnValue((conn, consumer))
class TestBaseConnector(BaseConnectorTestCase):
connector_class = BaseConnector
@inlineCallbacks
def test_creation(self):
conn = yield self.mk_connector(connector_name="foo")
self.assertEqual(conn.name, "foo")
self.assertTrue(isinstance(conn.worker, BaseWorker))
@inlineCallbacks
def test_middlewares_consume(self):
worker = yield self.worker_helper.get_worker(DummyWorker, {})
middlewares = [RecordingMiddleware(
str(i), {'consume_priority': 0, 'publish_priority': 0}, worker)
for i in range(3)]
conn, consumer = yield self.mk_consumer(
worker=worker, connector_name='foo', middlewares=middlewares)
consumer.unpause()
msgs = []
conn._set_default_endpoint_handler('inbound', msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
record = msgs[0].payload.pop('record')
self.assertEqual(record,
[(str(i), 'inbound', 'foo')
for i in range(3)])
@inlineCallbacks
def test_middlewares_publish(self):
worker = yield self.worker_helper.get_worker(DummyWorker, {})
middlewares = [RecordingMiddleware(
str(i), {'consume_priority': 0, 'publish_priority': 0}, worker)
for i in range(3)]
conn = yield self.mk_connector(
worker=worker, connector_name='foo', middlewares=middlewares)
yield conn._setup_publisher('outbound')
msg = self.msg_helper.make_outbound("outbound")
yield conn._publish_message('outbound', msg, 'dummy_endpoint')
msgs = self.worker_helper.get_dispatched_outbound('foo')
record = msgs[0].payload.pop('record')
self.assertEqual(record,
[[str(i), 'outbound', 'foo']
for i in range(2, -1, -1)])
@inlineCallbacks
def test_pretech_count(self):
conn, consumer = yield self.mk_consumer(prefetch_count=10)
self.assertEqual(consumer.channel.qos_prefetch_count, 10)
@inlineCallbacks
def test_setup_raises(self):
conn = yield self.mk_connector()
self.assertRaises(NotImplementedError, conn.setup)
@inlineCallbacks
def test_teardown(self):
conn, consumer = yield self.mk_consumer()
self.assertTrue(consumer.keep_consuming)
yield conn.teardown()
self.assertFalse(consumer.keep_consuming)
@inlineCallbacks
def test_paused(self):
conn, consumer = yield self.mk_consumer()
consumer.pause()
self.assertTrue(conn.paused)
consumer.unpause()
self.assertFalse(conn.paused)
@inlineCallbacks
def test_pause(self):
conn, consumer = yield self.mk_consumer()
consumer.unpause()
self.assertFalse(consumer.paused)
conn.pause()
self.assertTrue(consumer.paused)
@inlineCallbacks
def test_unpause(self):
conn, consumer = yield self.mk_consumer()
consumer.pause()
self.assertTrue(consumer.paused)
conn.unpause()
self.assertFalse(consumer.paused)
@inlineCallbacks
def test_setup_publisher(self):
conn = yield self.mk_connector(connector_name='foo')
publisher = yield conn._setup_publisher('outbound')
self.assertEqual(publisher.routing_key, 'foo.outbound')
@inlineCallbacks
def test_setup_consumer(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
self.assertTrue(consumer.paused)
self.assertEqual(consumer.routing_key, 'foo.inbound')
self.assertEqual(consumer.message_class, TransportUserMessage)
@inlineCallbacks
def test_set_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_endpoint_handler('inbound', msgs.append, 'dummy_endpoint')
msg = self.msg_helper.make_inbound("inbound")
msg.set_routing_endpoint('dummy_endpoint')
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_none_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_endpoint_handler('inbound', msgs.append, None)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_default_endpoint_handler('inbound', msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_message_with_endpoint(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn._setup_publisher('outbound')
msg = self.msg_helper.make_outbound("outbound")
yield conn._publish_message('outbound', msg, 'dummy_endpoint')
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
class TestReceiveInboundConnector(BaseConnectorTestCase):
connector_class = ReceiveInboundConnector
@inlineCallbacks
def test_setup(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn.setup()
conn.unpause()
with LogCatcher() as lc:
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
[msg_log] = lc.messages()
self.assertTrue(msg_log.startswith("No inbound handler for 'foo'"))
with LogCatcher() as lc:
event = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(event, 'foo')
[event_log] = lc.messages()
self.assertTrue(event_log.startswith("No event handler for 'foo'"))
msg = self.msg_helper.make_outbound("outbound")
yield conn.publish_outbound(msg)
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_default_inbound_handler(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
with LogCatcher() as lc:
conn.default_inbound_handler(
self.msg_helper.make_inbound("inbound"))
[log] = lc.messages()
self.assertTrue(log.startswith("No inbound handler for 'foo'"))
@inlineCallbacks
def test_default_event_handler(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
with LogCatcher() as lc:
conn.default_event_handler(self.msg_helper.make_ack())
[log] = lc.messages()
self.assertTrue(log.startswith("No event handler for 'foo'"))
@inlineCallbacks
def test_set_inbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_inbound_handler(msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_inbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_inbound_handler(msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_event_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_event_handler(msgs.append)
msg = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_event_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_event_handler(msgs.append)
msg = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_outbound(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
msg = self.msg_helper.make_outbound("outbound")
yield conn.publish_outbound(msg)
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_inbound_handler_ignore_message(self):
def im_handler(msg):
raise IgnoreMessage()
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_inbound_handler(im_handler)
msg = self.msg_helper.make_inbound("inbound")
with LogCatcher() as lc:
yield self.worker_helper.dispatch_inbound(msg, 'foo')
[log] = lc.messages()
self.assertTrue(log.startswith(
"Ignoring msg due to IgnoreMessage(): <Message"))
class TestReceiveOutboundConnector(BaseConnectorTestCase):
connector_class = ReceiveOutboundConnector
@inlineCallbacks
def test_setup(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn.setup()
conn.unpause()
with LogCatcher() as lc:
msg = self.msg_helper.make_outbound("outbound")
yield self.worker_helper.dispatch_outbound(msg, 'foo')
[log] = lc.messages()
self.assertTrue(log.startswith("No outbound handler for 'foo'"))
msg = self.msg_helper.make_inbound("inbound")
yield conn.publish_inbound(msg)
msgs = self.worker_helper.get_dispatched_inbound('foo')
self.assertEqual(msgs, [msg])
msg = self.msg_helper.make_ack()
yield conn.publish_event(msg)
msgs = self.worker_helper.get_dispatched_events('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_default_outbound_handler(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
with LogCatcher() as lc:
conn.default_outbound_handler(
self.msg_helper.make_outbound("outbound"))
[log] = lc.messages()
self.assertTrue(log.startswith("No outbound handler for 'foo'"))
@inlineCallbacks
def test_set_outbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_outbound_handler(msgs.append)
msg = self.msg_helper.make_outbound("outbound")
yield self.worker_helper.dispatch_outbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_outbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_outbound_handler(msgs.append)
msg = self.msg_helper.make_outbound("outbound")
yield self.worker_helper.dispatch_outbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_inbound(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
msg = self.msg_helper.make_inbound("inbound")
yield conn.publish_inbound(msg)
msgs = self.worker_helper.get_dispatched_inbound('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_event(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
msg = self.msg_helper.make_ack()
yield conn.publish_event(msg)
msgs = self.worker_helper.get_dispatched_events('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_outbound_handler_nack_message(self):
def im_handler(msg):
raise IgnoreMessage()
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_outbound_handler(im_handler)
msg = self.msg_helper.make_inbound("inbound")
with LogCatcher() as lc:
yield self.worker_helper.dispatch_outbound(msg, 'foo')
[log] = lc.messages()
self.assertTrue(log.startswith(
"Ignoring msg (with NACK) due to IgnoreMessage(): <Message"))
[event] = self.worker_helper.get_dispatched_events('foo')
self.assertEqual(event['event_type'], 'nack')
| TouK/vumi | vumi/tests/test_connectors.py | Python | bsd-3-clause | 14,757 |
#!/usr/bin/env python
# Copyright (c) 2014-2015 Daniel Kraft
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# CLI to solve an auxpow (or not) in regtest difficulty.
import binascii
import hashlib
import sys
def computeAuxpow (block, target, ok):
"""
Build an auxpow object (serialised as hex string) that solves
(ok = True) or doesn't solve (ok = False) the block.
"""
# Start by building the merge-mining coinbase. The merkle tree
# consists only of the block hash as root.
coinbase = "fabe" + binascii.hexlify ("m" * 2)
coinbase += block
coinbase += "01000000" + ("00" * 4)
# Construct "vector" of transaction inputs.
vin = "01"
vin += ("00" * 32) + ("ff" * 4)
vin += ("%02x" % (len (coinbase) / 2)) + coinbase
vin += ("ff" * 4)
# Build up the full coinbase transaction. It consists only
# of the input and has no outputs.
tx = "01000000" + vin + "00" + ("00" * 4)
txHash = doubleHashHex (tx)
# Construct the parent block header. It need not be valid, just good
# enough for auxpow purposes.
header = "01000000"
header += "00" * 32
header += reverseHex (txHash)
header += "00" * 4
header += "00" * 4
header += "00" * 4
# Mine the block.
(header, blockhash) = mineBlock (header, target, ok)
# Build the MerkleTx part of the auxpow.
auxpow = tx
auxpow += blockhash
auxpow += "00"
auxpow += "00" * 4
# Extend to full auxpow.
auxpow += "00"
auxpow += "00" * 4
auxpow += header
return auxpow
def mineBlock (header, target, ok):
"""
Given a block header, update the nonce until it is ok (or not)
for the given target.
"""
data = bytearray (binascii.unhexlify (header))
while True:
assert data[79] < 255
data[79] += 1
hexData = binascii.hexlify (data)
blockhash = doubleHashHex (hexData)
if (ok and blockhash < target) or ((not ok) and blockhash > target):
break
return (hexData, blockhash)
def doubleHashHex (data):
"""
Perform Crowncoin's Double-SHA256 hash on the given hex string.
"""
hasher = hashlib.sha256 ()
hasher.update (binascii.unhexlify (data))
data = hasher.digest ()
hasher = hashlib.sha256 ()
hasher.update (data)
return reverseHex (hasher.hexdigest ())
def reverseHex (data):
"""
Flip byte order in the given data (hex string).
"""
b = bytearray (binascii.unhexlify (data))
b.reverse ()
return binascii.hexlify (b)
################################################################################
if len (sys.argv) != 4:
print "Usage: solveauxpow.py HASH _TARGET OK"
sys.exit ()
blockHash = sys.argv[1]
revTarget = sys.argv[2]
ok = sys.argv[3]
if ok not in ["true", "false"]:
print "expected 'true' or 'false' as OK value"
sys.exit ()
target = reverseHex (revTarget)
ok = (ok == "true")
res = computeAuxpow (blockHash, target, ok)
print res
| cerebrus29301/crowncoin | contrib/solveauxpow.py | Python | mit | 3,047 |
'''Trains two recurrent neural networks based upon a story and a question.
The resulting merged vector is then queried to answer a range of bAbI tasks.
The results are comparable to those for an LSTM model provided in Weston et al.:
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks"
http://arxiv.org/abs/1502.05698
Task Number | FB LSTM Baseline | Keras QA
--- | --- | ---
QA1 - Single Supporting Fact | 50 | 100.0
QA2 - Two Supporting Facts | 20 | 50.0
QA3 - Three Supporting Facts | 20 | 20.5
QA4 - Two Arg. Relations | 61 | 62.9
QA5 - Three Arg. Relations | 70 | 61.9
QA6 - Yes/No Questions | 48 | 50.7
QA7 - Counting | 49 | 78.9
QA8 - Lists/Sets | 45 | 77.2
QA9 - Simple Negation | 64 | 64.0
QA10 - Indefinite Knowledge | 44 | 47.7
QA11 - Basic Coreference | 72 | 74.9
QA12 - Conjunction | 74 | 76.4
QA13 - Compound Coreference | 94 | 94.4
QA14 - Time Reasoning | 27 | 34.8
QA15 - Basic Deduction | 21 | 32.4
QA16 - Basic Induction | 23 | 50.6
QA17 - Positional Reasoning | 51 | 49.1
QA18 - Size Reasoning | 52 | 90.8
QA19 - Path Finding | 8 | 9.0
QA20 - Agent's Motivations | 91 | 90.7
For the resources related to the bAbI project, refer to:
https://research.facebook.com/researchers/1543934539189348
Notes:
- With default word, sentence, and query vector sizes, the GRU model achieves:
- 100% test accuracy on QA1 in 20 epochs (2 seconds per epoch on CPU)
- 50% test accuracy on QA2 in 20 epochs (16 seconds per epoch on CPU)
In comparison, the Facebook paper achieves 50% and 20% for the LSTM baseline.
- The task does not traditionally parse the question separately. This likely
improves accuracy and is a good example of merging two RNNs.
- The word vector embeddings are not shared between the story and question RNNs.
- See how the accuracy changes given 10,000 training samples (en-10k) instead
of only 1000. 1000 was used in order to be comparable to the original paper.
- Experiment with GRU, LSTM, and JZS1-3 as they give subtly different results.
- The length and noise (i.e. 'useless' story components) impact the ability for
LSTMs / GRUs to provide the correct answer. Given only the supporting facts,
these RNNs can achieve 100% accuracy on many tasks. Memory networks and neural
networks that use attentional processes can efficiently search through this
noise to find the relevant statements, improving performance substantially.
This becomes especially obvious on QA2 and QA3, both far longer than QA1.
'''
from __future__ import print_function
from functools import reduce
import re
import tarfile
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from keras.layers import Dense, Merge, Dropout, RepeatVector
from keras.layers import recurrent
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y)
RNN = recurrent.LSTM
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
EPOCHS = 40
print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN, EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE, QUERY_HIDDEN_SIZE))
try:
path = get_file('babi-tasks-v1-2.tar.gz', origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
tar = tarfile.open(path)
# Default QA1 with 1000 samples
# challenge = 'tasks_1-20_v1-2/en/qa1_single-supporting-fact_{}.txt'
# QA1 with 10,000 samples
# challenge = 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt'
# QA2 with 1000 samples
challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt'
# QA2 with 10,000 samples
# challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt'
train = get_stories(tar.extractfile(challenge.format('train')))
test = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train + test)))
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
story_maxlen = max(map(len, (x for x, _, _ in train + test)))
query_maxlen = max(map(len, (x for _, x, _ in train + test)))
X, Xq, Y = vectorize_stories(train, word_idx, story_maxlen, query_maxlen)
tX, tXq, tY = vectorize_stories(test, word_idx, story_maxlen, query_maxlen)
print('vocab = {}'.format(vocab))
print('X.shape = {}'.format(X.shape))
print('Xq.shape = {}'.format(Xq.shape))
print('Y.shape = {}'.format(Y.shape))
print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen))
print('Build model...')
sentrnn = Sequential()
sentrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE,
input_length=story_maxlen))
sentrnn.add(Dropout(0.3))
qrnn = Sequential()
qrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE,
input_length=query_maxlen))
qrnn.add(Dropout(0.3))
qrnn.add(RNN(EMBED_HIDDEN_SIZE, return_sequences=False))
qrnn.add(RepeatVector(story_maxlen))
model = Sequential()
model.add(Merge([sentrnn, qrnn], mode='sum'))
model.add(RNN(EMBED_HIDDEN_SIZE, return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(vocab_size, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Training')
model.fit([X, Xq], Y, batch_size=BATCH_SIZE, nb_epoch=EPOCHS, validation_split=0.05)
loss, acc = model.evaluate([tX, tXq], tY, batch_size=BATCH_SIZE)
print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
| kemaswill/keras | examples/babi_rnn.py | Python | mit | 8,596 |
version_info = (1, 9, 'dev0')
__version__ = '.'.join(map(str, version_info))
| fevangelista/pyWicked | external/pybind11/pybind11/_version.py | Python | mit | 77 |
from __future__ import print_function
__title__ = 'pif.utils'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('ensure_autodiscover', 'list_checkers', 'get_public_ip')
from pif.base import registry
from pif.discover import autodiscover
def ensure_autodiscover():
"""
Ensures the IP checkers are discovered.
"""
if not registry._registry:
autodiscover()
def list_checkers():
"""
Lists available checkers.
:return list:
"""
return registry._registry.keys()
def get_public_ip(preferred_checker=None, verbose=False):
"""
Gets IP using one of the services.
:param str preffered checker: Checker UID. If given, the preferred checker is used.
:param bool verbose: If set to True, debug info is printed.
:return str:
"""
ensure_autodiscover()
# If use preferred checker.
if preferred_checker:
ip_checker_cls = registry.get(preferred_checker)
if not ip_checker_cls:
return False
ip_checker = ip_checker_cls(verbose=verbose)
ip = ip_checker.get_public_ip()
if verbose:
print('provider: ', ip_checker_cls)
return ip
# Using all checkers.
for ip_checker_name, ip_checker_cls in registry._registry.items():
ip_checker = ip_checker_cls(verbose=verbose)
try:
ip = ip_checker.get_public_ip()
if ip:
if verbose:
print('provider: ', ip_checker_cls)
return ip
except Exception as e:
if verbose:
print(e)
return False
| djabber/Dashboard | bottle/dash/local/lib/pif-0.7/src/pif/utils.py | Python | mit | 1,686 |
from abc import abstractmethod
import sys, abc
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
import numpy as np
from enum import Enum
class Env(ABC):
class Terminate(Enum):
Null = 0
Fail = 1
Succ = 2
def __init__(self, args, enable_draw):
self.enable_draw = enable_draw
return
| MadManRises/Madgine | shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/deep_mimic/env/env.py | Python | mit | 348 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "1.0.0b3"
| Azure/azure-sdk-for-python | sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/_version.py | Python | mit | 488 |
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result | audreyr/opencomparison | package/utils.py | Python | mit | 269 |
from gettext import gettext as _
import traceback
from pulp.client.commands.repo.sync_publish import StatusRenderer
from pulp.client.extensions.core import COLOR_FAILURE
from pulp_puppet.common import constants
from pulp_puppet.common.publish_progress import PublishProgressReport
from pulp_puppet.common.sync_progress import SyncProgressReport
class PuppetStatusRenderer(StatusRenderer):
def __init__(self, context):
super(PuppetStatusRenderer, self).__init__(context)
# Sync Steps
self.sync_metadata_last_state = constants.STATE_NOT_STARTED
self.sync_modules_last_state = constants.STATE_NOT_STARTED
# Publish Steps
self.publish_modules_last_state = constants.STATE_NOT_STARTED
self.publish_metadata_last_state = constants.STATE_NOT_STARTED
self.publish_http_last_state = constants.STATE_NOT_STARTED
self.publish_https_last_state = constants.STATE_NOT_STARTED
# UI Widgets
self.sync_metadata_bar = self.prompt.create_progress_bar()
self.sync_modules_bar = self.prompt.create_progress_bar()
self.publish_modules_bar = self.prompt.create_progress_bar()
self.publish_metadata_spinner = self.prompt.create_spinner()
def display_report(self, progress_report):
# Sync Steps
if constants.IMPORTER_ID in progress_report:
sync_report = SyncProgressReport.from_progress_dict(progress_report[constants.IMPORTER_ID])
self._display_sync_metadata_step(sync_report)
self._display_sync_modules_step(sync_report)
# Publish Steps
if constants.DISTRIBUTOR_ID in progress_report:
publish_report = PublishProgressReport.from_progress_dict(progress_report[constants.DISTRIBUTOR_ID])
self._display_publish_modules_step(publish_report)
self._display_publish_metadata_step(publish_report)
self._display_publish_http_https_step(publish_report)
def _display_sync_metadata_step(self, sync_report):
# Do nothing if it hasn't started yet or has already finished
if sync_report.metadata_state == constants.STATE_NOT_STARTED or \
self.sync_metadata_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.sync_metadata_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Downloading metadata...'), tag='download-metadata')
# Same behavior for running or success
if sync_report.metadata_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS):
items_done = sync_report.metadata_query_finished_count
items_total = sync_report.metadata_query_total_count
item_type = _('Metadata Query')
self._render_itemized_in_progress_state(items_done, items_total,
item_type, self.sync_metadata_bar, sync_report.metadata_state)
# The only state left to handle is if it failed
else:
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(sync_report.metadata_error_message,
sync_report.metadata_exception,
sync_report.metadata_traceback)
# Before finishing update the state
self.sync_metadata_last_state = sync_report.metadata_state
def _display_sync_modules_step(self, sync_report):
# Do nothing if it hasn't started yet or has already finished
if sync_report.modules_state == constants.STATE_NOT_STARTED or \
self.sync_modules_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.sync_modules_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Downloading new modules...'), tag='downloading')
# Same behavior for running or success
if sync_report.modules_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS):
items_done = sync_report.modules_finished_count + sync_report.modules_error_count
items_total = sync_report.modules_total_count
item_type = _('Module')
self._render_itemized_in_progress_state(items_done, items_total, item_type,
self.sync_modules_bar, sync_report.modules_state)
# The only state left to handle is if it failed
else:
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(sync_report.modules_error_message,
sync_report.modules_exception,
sync_report.modules_traceback)
# Regardless of success or failure, display any individual module errors
# if the new state is complete
if sync_report.modules_state in constants.COMPLETE_STATES:
self._render_module_errors(sync_report.modules_individual_errors)
# Before finishing update the state
self.sync_modules_last_state = sync_report.modules_state
def _display_publish_modules_step(self, publish_report):
# Do nothing if it hasn't started yet or has already finished
if publish_report.modules_state == constants.STATE_NOT_STARTED or \
self.publish_modules_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.publish_modules_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Publishing modules...'), tag='publishing')
# Same behavior for running or success
if publish_report.modules_state in (constants.STATE_RUNNING, constants.STATE_SUCCESS):
items_done = publish_report.modules_finished_count + publish_report.modules_error_count
items_total = publish_report.modules_total_count
item_type = _('Module')
self._render_itemized_in_progress_state(items_done, items_total, item_type,
self.publish_modules_bar, publish_report.modules_state)
# The only state left to handle is if it failed
else:
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(publish_report.modules_error_message,
publish_report.modules_exception,
publish_report.modules_traceback)
# Regardless of success or failure, display any individual module errors
# if the new state is complete
if publish_report.modules_state in constants.COMPLETE_STATES:
self._render_module_errors(publish_report.modules_individual_errors)
# Before finishing update the state
self.publish_modules_last_state = publish_report.modules_state
def _display_publish_metadata_step(self, publish_report):
# Do nothing if it hasn't started yet or has already finished
if publish_report.metadata_state == constants.STATE_NOT_STARTED or \
self.publish_metadata_last_state in constants.COMPLETE_STATES:
return
# Only render this on the first non-not-started state
if self.publish_metadata_last_state == constants.STATE_NOT_STARTED:
self.prompt.write(_('Generating repository metadata...'), tag='generating')
if publish_report.metadata_state == constants.STATE_RUNNING:
self.publish_metadata_spinner.next()
elif publish_report.metadata_state == constants.STATE_SUCCESS:
self.publish_metadata_spinner.next(finished=True)
self.prompt.write(_('... completed'), tag='completed')
self.prompt.render_spacer()
elif publish_report.metadata_state == constants.STATE_FAILED:
self.publish_metadata_spinner.next(finished=True)
self.prompt.render_failure_message(_('... failed'))
self.prompt.render_spacer()
self._render_error(publish_report.modules_error_message,
publish_report.modules_exception,
publish_report.modules_traceback)
self.publish_metadata_last_state = publish_report.metadata_state
def _display_publish_http_https_step(self, publish_report):
# -- HTTP --------
if publish_report.publish_http != constants.STATE_NOT_STARTED and \
self.publish_http_last_state not in constants.COMPLETE_STATES:
self.prompt.write(_('Publishing repository over HTTP...'))
if publish_report.publish_http == constants.STATE_SUCCESS:
self.prompt.write(_('... completed'), tag='http-completed')
elif publish_report.publish_http == constants.STATE_SKIPPED:
self.prompt.write(_('... skipped'), tag='http-skipped')
else:
self.prompt.write(_('... unknown'), tag='http-unknown')
self.publish_http_last_state = publish_report.publish_http
self.prompt.render_spacer()
# -- HTTPS --------
if publish_report.publish_https != constants.STATE_NOT_STARTED and \
self.publish_https_last_state not in constants.COMPLETE_STATES:
self.prompt.write(_('Publishing repository over HTTPS...'))
if publish_report.publish_https == constants.STATE_SUCCESS:
self.prompt.write(_('... completed'), tag='https-completed')
elif publish_report.publish_https == constants.STATE_SKIPPED:
self.prompt.write(_('... skipped'), tag='https-skipped')
else:
self.prompt.write(_('... unknown'), tag='https-unknown')
self.publish_https_last_state = publish_report.publish_https
def _render_itemized_in_progress_state(self, items_done, items_total, type_name,
progress_bar, current_state):
"""
This is a pretty ugly way of reusing similar code between the publish
steps for packages and distributions. There might be a cleaner way
but I was having trouble updating the correct state variable and frankly
I'm out of time. Feel free to fix this if you are inspired.
"""
# For the progress bar to work, we can't write anything after it until
# we're completely finished with it. Assemble the download summary into
# a string and let the progress bar render it.
message_data = {
'name' : type_name.title(),
'items_done' : items_done,
'items_total' : items_total,
}
template = _('%(name)s: %(items_done)s/%(items_total)s items')
bar_message = template % message_data
# If there's nothing to download in this step, flag the bar as complete
if items_total is 0:
items_total = items_done = 1
progress_bar.render(items_done, items_total, message=bar_message)
if current_state == constants.STATE_SUCCESS:
self.prompt.write(_('... completed'))
self.prompt.render_spacer()
def _render_module_errors(self, individual_errors):
"""
:param individual_errors: dictionary where keys are module names and
values are dicts with keys 'exception' and
'traceback'.
:type individual_errors: dict
"""
if individual_errors:
# TODO: read this from config
display_error_count = 20
self.prompt.render_failure_message(_('Could not import the following modules:'))
for module_error in individual_errors[:display_error_count]:
msg = _(' %(module)s: %(error)s')
msg = msg % {'module': module_error['module'], 'error': module_error['exception']}
self.prompt.write(msg, color=COLOR_FAILURE)
self.prompt.render_spacer()
def _render_error(self, error_message, exception, traceback):
msg = _('The following error was encountered during the previous '
'step. More information can be found by passing -v flag one or more times')
self.prompt.render_failure_message(msg)
self.prompt.render_spacer()
self.prompt.render_failure_message(' %s' % error_message)
self.context.logger.error(error_message)
self.context.logger.error(exception)
self.context.logger.error(traceback)
| dkliban/pulp_puppet | pulp_puppet_extensions_admin/pulp_puppet/extensions/admin/repo/status.py | Python | gpl-2.0 | 12,614 |
# asciixmas
# December 1989 Larry Bartz Indianapolis, IN
#
# $Id: xmas.py 36559 2004-07-18 05:56:09Z tim_one $
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the one's I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "Thomas Gellekum <[email protected]>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curses.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(0, 5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
treescrn8.touchwin()
# ALL ON
treescrn.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
return
def deer_step(win, y, x):
win.mvwin(y, x)
win.refresh()
w_del_msg.refresh()
look_out(5)
def reindeer():
y_pos = 0
for x_pos in range(70, 62, -1):
if x_pos < 66: y_pos = 1
for looper in range(0, 4):
dotdeer0.addch(y_pos, x_pos, ord('.'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
look_out(50)
y_pos = 2
for x_pos in range(x_pos - 1, 50, -1):
for looper in range(0, 4):
if x_pos < 56:
y_pos = 3
try:
stardeer0.addch(y_pos, x_pos, ord('*'))
except curses.error:
pass
stardeer0.refresh()
w_del_msg.refresh()
stardeer0.erase()
stardeer0.refresh()
w_del_msg.refresh()
else:
dotdeer0.addch(y_pos, x_pos, ord('*'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
x_pos = 58
for y_pos in range(2, 5):
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 4):
deer_step(lildeer3, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer1, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer3, y_pos, x_pos)
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
x_pos -= 2
x_pos = 35
for y_pos in range(5, 10):
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 2):
deer_step(middeer3, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer1, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer3, y_pos, x_pos)
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
x_pos -= 3
look_out(300)
y_pos = 1
for x_pos in range(8, 16):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
x_pos -= 1
for looper in range(0, 6):
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer1, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer0, y_pos, x_pos)
for y_pos in range(y_pos, 10):
for looper in range(0, 2):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
y_pos -= 1
deer_step(lookdeer3, y_pos, x_pos)
return
def main(win):
global stdscr
stdscr = win
global my_bg, y_pos, x_pos
global treescrn, treescrn2, treescrn3, treescrn4
global treescrn5, treescrn6, treescrn7, treescrn8
global dotdeer0, stardeer0
global lildeer0, lildeer1, lildeer2, lildeer3
global middeer0, middeer1, middeer2, middeer3
global bigdeer0, bigdeer1, bigdeer2, bigdeer3, bigdeer4
global lookdeer0, lookdeer1, lookdeer2, lookdeer3, lookdeer4
global w_holiday, w_del_msg
my_bg = curses.COLOR_BLACK
# curses.curs_set(0)
treescrn = curses.newwin(16, 27, 3, 53)
treescrn2 = curses.newwin(16, 27, 3, 53)
treescrn3 = curses.newwin(16, 27, 3, 53)
treescrn4 = curses.newwin(16, 27, 3, 53)
treescrn5 = curses.newwin(16, 27, 3, 53)
treescrn6 = curses.newwin(16, 27, 3, 53)
treescrn7 = curses.newwin(16, 27, 3, 53)
treescrn8 = curses.newwin(16, 27, 3, 53)
dotdeer0 = curses.newwin(3, 71, 0, 8)
stardeer0 = curses.newwin(4, 56, 0, 8)
lildeer0 = curses.newwin(7, 53, 0, 8)
lildeer1 = curses.newwin(2, 4, 0, 0)
lildeer2 = curses.newwin(2, 4, 0, 0)
lildeer3 = curses.newwin(2, 4, 0, 0)
middeer0 = curses.newwin(15, 42, 0, 8)
middeer1 = curses.newwin(3, 7, 0, 0)
middeer2 = curses.newwin(3, 7, 0, 0)
middeer3 = curses.newwin(3, 7, 0, 0)
bigdeer0 = curses.newwin(10, 23, 0, 0)
bigdeer1 = curses.newwin(10, 23, 0, 0)
bigdeer2 = curses.newwin(10, 23, 0, 0)
bigdeer3 = curses.newwin(10, 23, 0, 0)
bigdeer4 = curses.newwin(10, 23, 0, 0)
lookdeer0 = curses.newwin(10, 25, 0, 0)
lookdeer1 = curses.newwin(10, 25, 0, 0)
lookdeer2 = curses.newwin(10, 25, 0, 0)
lookdeer3 = curses.newwin(10, 25, 0, 0)
lookdeer4 = curses.newwin(10, 25, 0, 0)
w_holiday = curses.newwin(1, 27, 3, 27)
w_del_msg = curses.newwin(1, 20, 23, 60)
try:
w_del_msg.addstr(0, 0, "Hit any key to quit")
except curses.error:
pass
try:
w_holiday.addstr(0, 0, "H A P P Y H O L I D A Y S")
except curses.error:
pass
# set up the windows for our various reindeer
lildeer1.addch(0, 0, ord('V'))
lildeer1.addch(1, 0, ord('@'))
lildeer1.addch(1, 1, ord('<'))
lildeer1.addch(1, 2, ord('>'))
try:
lildeer1.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer2.addch(0, 0, ord('V'))
lildeer2.addch(1, 0, ord('@'))
lildeer2.addch(1, 1, ord('|'))
lildeer2.addch(1, 2, ord('|'))
try:
lildeer2.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer3.addch(0, 0, ord('V'))
lildeer3.addch(1, 0, ord('@'))
lildeer3.addch(1, 1, ord('>'))
lildeer3.addch(1, 2, ord('<'))
try:
lildeer2.addch(1, 3, ord('~')) # XXX
except curses.error:
pass
middeer1.addch(0, 2, ord('y'))
middeer1.addch(0, 3, ord('y'))
middeer1.addch(1, 2, ord('0'))
middeer1.addch(1, 3, ord('('))
middeer1.addch(1, 4, ord('='))
middeer1.addch(1, 5, ord(')'))
middeer1.addch(1, 6, ord('~'))
middeer1.addch(2, 3, ord('\\'))
middeer1.addch(2, 5, ord('/'))
middeer2.addch(0, 2, ord('y'))
middeer2.addch(0, 3, ord('y'))
middeer2.addch(1, 2, ord('0'))
middeer2.addch(1, 3, ord('('))
middeer2.addch(1, 4, ord('='))
middeer2.addch(1, 5, ord(')'))
middeer2.addch(1, 6, ord('~'))
middeer2.addch(2, 3, ord('|'))
middeer2.addch(2, 5, ord('|'))
middeer3.addch(0, 2, ord('y'))
middeer3.addch(0, 3, ord('y'))
middeer3.addch(1, 2, ord('0'))
middeer3.addch(1, 3, ord('('))
middeer3.addch(1, 4, ord('='))
middeer3.addch(1, 5, ord(')'))
middeer3.addch(1, 6, ord('~'))
middeer3.addch(2, 3, ord('/'))
middeer3.addch(2, 5, ord('\\'))
bigdeer1.addch(0, 17, ord('\\'))
bigdeer1.addch(0, 18, ord('/'))
bigdeer1.addch(0, 19, ord('\\'))
bigdeer1.addch(0, 20, ord('/'))
bigdeer1.addch(1, 18, ord('\\'))
bigdeer1.addch(1, 20, ord('/'))
bigdeer1.addch(2, 19, ord('|'))
bigdeer1.addch(2, 20, ord('_'))
bigdeer1.addch(3, 18, ord('/'))
bigdeer1.addch(3, 19, ord('^'))
bigdeer1.addch(3, 20, ord('0'))
bigdeer1.addch(3, 21, ord('\\'))
bigdeer1.addch(4, 17, ord('/'))
bigdeer1.addch(4, 18, ord('/'))
bigdeer1.addch(4, 19, ord('\\'))
bigdeer1.addch(4, 22, ord('\\'))
bigdeer1.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer1.addstr(6, 7, "( \\_____( /") # ))
bigdeer1.addstr(7, 8, "( ) /")
bigdeer1.addstr(8, 9, "\\\\ /")
bigdeer1.addstr(9, 11, "\\>/>")
bigdeer2.addch(0, 17, ord('\\'))
bigdeer2.addch(0, 18, ord('/'))
bigdeer2.addch(0, 19, ord('\\'))
bigdeer2.addch(0, 20, ord('/'))
bigdeer2.addch(1, 18, ord('\\'))
bigdeer2.addch(1, 20, ord('/'))
bigdeer2.addch(2, 19, ord('|'))
bigdeer2.addch(2, 20, ord('_'))
bigdeer2.addch(3, 18, ord('/'))
bigdeer2.addch(3, 19, ord('^'))
bigdeer2.addch(3, 20, ord('0'))
bigdeer2.addch(3, 21, ord('\\'))
bigdeer2.addch(4, 17, ord('/'))
bigdeer2.addch(4, 18, ord('/'))
bigdeer2.addch(4, 19, ord('\\'))
bigdeer2.addch(4, 22, ord('\\'))
bigdeer2.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer2.addstr(6, 7, "(( )____( /") # ))
bigdeer2.addstr(7, 7, "( / |")
bigdeer2.addstr(8, 8, "\\/ |")
bigdeer2.addstr(9, 9, "|> |>")
bigdeer3.addch(0, 17, ord('\\'))
bigdeer3.addch(0, 18, ord('/'))
bigdeer3.addch(0, 19, ord('\\'))
bigdeer3.addch(0, 20, ord('/'))
bigdeer3.addch(1, 18, ord('\\'))
bigdeer3.addch(1, 20, ord('/'))
bigdeer3.addch(2, 19, ord('|'))
bigdeer3.addch(2, 20, ord('_'))
bigdeer3.addch(3, 18, ord('/'))
bigdeer3.addch(3, 19, ord('^'))
bigdeer3.addch(3, 20, ord('0'))
bigdeer3.addch(3, 21, ord('\\'))
bigdeer3.addch(4, 17, ord('/'))
bigdeer3.addch(4, 18, ord('/'))
bigdeer3.addch(4, 19, ord('\\'))
bigdeer3.addch(4, 22, ord('\\'))
bigdeer3.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer3.addstr(6, 6, "( ()_____( /") # ))
bigdeer3.addstr(7, 6, "/ / /")
bigdeer3.addstr(8, 5, "|/ \\")
bigdeer3.addstr(9, 5, "/> \\>")
bigdeer4.addch(0, 17, ord('\\'))
bigdeer4.addch(0, 18, ord('/'))
bigdeer4.addch(0, 19, ord('\\'))
bigdeer4.addch(0, 20, ord('/'))
bigdeer4.addch(1, 18, ord('\\'))
bigdeer4.addch(1, 20, ord('/'))
bigdeer4.addch(2, 19, ord('|'))
bigdeer4.addch(2, 20, ord('_'))
bigdeer4.addch(3, 18, ord('/'))
bigdeer4.addch(3, 19, ord('^'))
bigdeer4.addch(3, 20, ord('0'))
bigdeer4.addch(3, 21, ord('\\'))
bigdeer4.addch(4, 17, ord('/'))
bigdeer4.addch(4, 18, ord('/'))
bigdeer4.addch(4, 19, ord('\\'))
bigdeer4.addch(4, 22, ord('\\'))
bigdeer4.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer4.addstr(6, 6, "( )______( /") # )
bigdeer4.addstr(7, 5, "(/ \\") # )
bigdeer4.addstr(8, 0, "v___= ----^")
lookdeer1.addstr(0, 16, "\\/ \\/")
lookdeer1.addstr(1, 17, "\\Y/ \\Y/")
lookdeer1.addstr(2, 19, "\\=/")
lookdeer1.addstr(3, 17, "^\\o o/^")
lookdeer1.addstr(4, 17, "//( )")
lookdeer1.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer1.addstr(6, 7, "( \\_____( /") # ))
lookdeer1.addstr(7, 8, "( ) /")
lookdeer1.addstr(8, 9, "\\\\ /")
lookdeer1.addstr(9, 11, "\\>/>")
lookdeer2.addstr(0, 16, "\\/ \\/")
lookdeer2.addstr(1, 17, "\\Y/ \\Y/")
lookdeer2.addstr(2, 19, "\\=/")
lookdeer2.addstr(3, 17, "^\\o o/^")
lookdeer2.addstr(4, 17, "//( )")
lookdeer2.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer2.addstr(6, 7, "(( )____( /") # ))
lookdeer2.addstr(7, 7, "( / |")
lookdeer2.addstr(8, 8, "\\/ |")
lookdeer2.addstr(9, 9, "|> |>")
lookdeer3.addstr(0, 16, "\\/ \\/")
lookdeer3.addstr(1, 17, "\\Y/ \\Y/")
lookdeer3.addstr(2, 19, "\\=/")
lookdeer3.addstr(3, 17, "^\\o o/^")
lookdeer3.addstr(4, 17, "//( )")
lookdeer3.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer3.addstr(6, 6, "( ()_____( /") # ))
lookdeer3.addstr(7, 6, "/ / /")
lookdeer3.addstr(8, 5, "|/ \\")
lookdeer3.addstr(9, 5, "/> \\>")
lookdeer4.addstr(0, 16, "\\/ \\/")
lookdeer4.addstr(1, 17, "\\Y/ \\Y/")
lookdeer4.addstr(2, 19, "\\=/")
lookdeer4.addstr(3, 17, "^\\o o/^")
lookdeer4.addstr(4, 17, "//( )")
lookdeer4.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer4.addstr(6, 6, "( )______( /") # )
lookdeer4.addstr(7, 5, "(/ \\") # )
lookdeer4.addstr(8, 0, "v___= ----^")
###############################################
curses.cbreak()
stdscr.nodelay(1)
while 1:
stdscr.clear()
treescrn.erase()
w_del_msg.touchwin()
treescrn.touchwin()
treescrn2.erase()
treescrn2.touchwin()
treescrn8.erase()
treescrn8.touchwin()
stdscr.refresh()
look_out(150)
boxit()
stdscr.refresh()
look_out(150)
seas()
stdscr.refresh()
greet()
stdscr.refresh()
look_out(150)
fromwho()
stdscr.refresh()
look_out(150)
tree()
look_out(150)
balls()
look_out(150)
star()
look_out(150)
strng1()
strng2()
strng3()
strng4()
strng5()
# set up the windows for our blinking trees
#
# treescrn3
treescrn.overlay(treescrn3)
# balls
treescrn3.addch(4, 18, ord(' '))
treescrn3.addch(7, 6, ord(' '))
treescrn3.addch(8, 19, ord(' '))
treescrn3.addch(11, 22, ord(' '))
# star
treescrn3.addch(0, 12, ord('*'))
# strng1
treescrn3.addch(3, 11, ord(' '))
# strng2
treescrn3.addch(5, 13, ord(' '))
treescrn3.addch(6, 10, ord(' '))
# strng3
treescrn3.addch(7, 16, ord(' '))
treescrn3.addch(7, 14, ord(' '))
# strng4
treescrn3.addch(10, 13, ord(' '))
treescrn3.addch(10, 10, ord(' '))
treescrn3.addch(11, 8, ord(' '))
# strng5
treescrn3.addch(11, 18, ord(' '))
treescrn3.addch(12, 13, ord(' '))
# treescrn4
treescrn.overlay(treescrn4)
# balls
treescrn4.addch(3, 9, ord(' '))
treescrn4.addch(4, 16, ord(' '))
treescrn4.addch(7, 6, ord(' '))
treescrn4.addch(8, 19, ord(' '))
treescrn4.addch(11, 2, ord(' '))
treescrn4.addch(12, 23, ord(' '))
# star
treescrn4.standout()
treescrn4.addch(0, 12, ord('*'))
treescrn4.standend()
# strng1
treescrn4.addch(3, 13, ord(' '))
# strng2
# strng3
treescrn4.addch(7, 15, ord(' '))
treescrn4.addch(8, 11, ord(' '))
# strng4
treescrn4.addch(9, 16, ord(' '))
treescrn4.addch(10, 12, ord(' '))
treescrn4.addch(11, 8, ord(' '))
# strng5
treescrn4.addch(11, 18, ord(' '))
treescrn4.addch(12, 14, ord(' '))
# treescrn5
treescrn.overlay(treescrn5)
# balls
treescrn5.addch(3, 15, ord(' '))
treescrn5.addch(10, 20, ord(' '))
treescrn5.addch(12, 1, ord(' '))
# star
treescrn5.addch(0, 12, ord(' '))
# strng1
treescrn5.addch(3, 11, ord(' '))
# strng2
treescrn5.addch(5, 12, ord(' '))
# strng3
treescrn5.addch(7, 14, ord(' '))
treescrn5.addch(8, 10, ord(' '))
# strng4
treescrn5.addch(9, 15, ord(' '))
treescrn5.addch(10, 11, ord(' '))
treescrn5.addch(11, 7, ord(' '))
# strng5
treescrn5.addch(11, 17, ord(' '))
treescrn5.addch(12, 13, ord(' '))
# treescrn6
treescrn.overlay(treescrn6)
# balls
treescrn6.addch(6, 7, ord(' '))
treescrn6.addch(7, 18, ord(' '))
treescrn6.addch(10, 4, ord(' '))
treescrn6.addch(11, 23, ord(' '))
# star
treescrn6.standout()
treescrn6.addch(0, 12, ord('*'))
treescrn6.standend()
# strng1
# strng2
treescrn6.addch(5, 11, ord(' '))
# strng3
treescrn6.addch(7, 13, ord(' '))
treescrn6.addch(8, 9, ord(' '))
# strng4
treescrn6.addch(9, 14, ord(' '))
treescrn6.addch(10, 10, ord(' '))
treescrn6.addch(11, 6, ord(' '))
# strng5
treescrn6.addch(11, 16, ord(' '))
treescrn6.addch(12, 12, ord(' '))
# treescrn7
treescrn.overlay(treescrn7)
# balls
treescrn7.addch(3, 15, ord(' '))
treescrn7.addch(6, 7, ord(' '))
treescrn7.addch(7, 18, ord(' '))
treescrn7.addch(10, 4, ord(' '))
treescrn7.addch(11, 22, ord(' '))
# star
treescrn7.addch(0, 12, ord('*'))
# strng1
treescrn7.addch(3, 12, ord(' '))
# strng2
treescrn7.addch(5, 13, ord(' '))
treescrn7.addch(6, 9, ord(' '))
# strng3
treescrn7.addch(7, 15, ord(' '))
treescrn7.addch(8, 11, ord(' '))
# strng4
treescrn7.addch(9, 16, ord(' '))
treescrn7.addch(10, 12, ord(' '))
treescrn7.addch(11, 8, ord(' '))
# strng5
treescrn7.addch(11, 18, ord(' '))
treescrn7.addch(12, 14, ord(' '))
look_out(150)
reindeer()
w_holiday.touchwin()
w_holiday.refresh()
w_del_msg.refresh()
look_out(500)
for i in range(0, 20):
blinkit()
curses.wrapper(main)
| xbmc/atv2 | xbmc/lib/libPython/Python/Demo/curses/xmas.py | Python | gpl-2.0 | 25,498 |
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
ReflectionBlock can render and process ReflectionIdevices as XHTML
"""
import logging
from exe.webui.block import Block
from exe.webui import common
from exe.webui.element import TextAreaElement
log = logging.getLogger(__name__)
# ===========================================================================
class ReflectionBlock(Block):
"""
ReflectionBlock can render and process ReflectionIdevices as XHTML
"""
def __init__(self, parent, idevice):
"""
Initialize a new Block object
"""
Block.__init__(self, parent, idevice)
self.activityInstruc = idevice.activityInstruc
self.answerInstruc = idevice.answerInstruc
# to compensate for the strange unpickling timing when objects are
# loaded from an elp, ensure that proper idevices are set:
if idevice.activityTextArea.idevice is None:
idevice.activityTextArea.idevice = idevice
if idevice.answerTextArea.idevice is None:
idevice.answerTextArea.idevice = idevice
self.activityElement = TextAreaElement(idevice.activityTextArea)
self.answerElement = TextAreaElement(idevice.answerTextArea)
self.previewing = False # In view or preview render
if not hasattr(self.idevice,'undo'):
self.idevice.undo = True
def process(self, request):
"""
Process the request arguments from the web server
"""
Block.process(self, request)
is_cancel = common.requestHasCancel(request)
if not is_cancel:
self.activityElement.process(request)
self.answerElement.process(request)
if "title"+self.id in request.args:
self.idevice.title = request.args["title"+self.id][0]
def renderEdit(self, style):
"""
Returns an XHTML string with the form element for editing this block
"""
html = "<div class=\"iDevice\"><br/>\n"
html += common.textInput("title"+self.id, self.idevice.title)
html += self.activityElement.renderEdit()
html += self.answerElement.renderEdit()
html += "<br/>" + self.renderEditButtons()
html += "</div>\n"
return html
def renderPreview(self, style):
"""
Remembers if we're previewing or not,
then implicitly calls self.renderViewContent (via Block.renderPreview)
"""
self.previewing = True
return Block.renderPreview(self, style)
def renderView(self, style):
"""
Remembers if we're previewing or not,
then implicitly calls self.renderViewContent (via Block.renderPreview)
"""
self.previewing = False
return Block.renderView(self, style)
def renderViewContent(self):
"""
Returns an XHTML string for this block
"""
if self.previewing:
html = self.activityElement.renderPreview()
feedback = self.answerElement.renderPreview()
else:
html = self.activityElement.renderView()
feedback = self.answerElement.renderView()
#added lernmodule.net
html += '<div class="Reflection" id="Reflection%s">' % (self.id)
html += '<textarea id="ReflectionText%s" class="ReflectionText" name="ReflectionText%s"' % (self.id, self.id)
html += ' rows=5 style="width:99%"></textarea></div>'
html += common.feedbackBlock(self.id,feedback)
return html
from exe.engine.reflectionidevice import ReflectionIdevice
from exe.webui.blockfactory import g_blockFactory
g_blockFactory.registerBlockType(ReflectionBlock, ReflectionIdevice)
# ===========================================================================
| tquilian/exeNext | exe/webui/reflectionblock.py | Python | gpl-2.0 | 4,875 |
"""Tests for CMFNotification installation ad uninstallation.
$Id: testInstallation.py 65679 2008-05-25 23:45:26Z dbaty $
"""
from zope.component import getUtility
from zope.component import getMultiAdapter
from AccessControl.PermissionRole import rolesForPermissionOn
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletAssignmentMapping
from Products.CMFCore.utils import getToolByName
from Products.CMFNotification.config import LAYER_NAME
from Products.CMFNotification.config import PORTLET_NAME
from Products.CMFNotification.NotificationTool import ID as TOOL_ID
from Products.CMFNotification.permissions import SUBSCRIBE_PERMISSION
from Products.CMFNotification.tests.plonetestbrowser import Browser
from Products.CMFNotification.tests.base import CMFNotificationTestCase
class TestInstallation(CMFNotificationTestCase):
"""Make sure that the product is properly installed."""
def afterSetUp(self):
pass
def testToolIsThere(self):
portal = self.portal
tool = getToolByName(self.portal, TOOL_ID)
self.failUnless(tool is not None)
def testSkinLayerIsThere(self):
stool = getToolByName(self.portal, 'portal_skins')
for skin, layers in stool._getSelections().items():
layers = layers.split(',')
self.failUnless(LAYER_NAME in layers)
self.failUnless(LAYER_NAME in stool.objectIds())
def testPortletCanBeAdded(self):
base_url = self.portal.absolute_url()
for name in ('plone.leftcolumn', 'plone.rightcolumn'):
manager = getUtility(IPortletManager,
name=name,
context=self.portal)
titles = [p.title for p in manager.getAddablePortletTypes()]
self.failUnless(PORTLET_NAME in titles)
manager = getUtility(IPortletManager,
name='plone.rightcolumn',
context=self.portal)
right_portlets = getMultiAdapter((self.portal, manager),
IPortletAssignmentMapping,
context=self.portal)
right_portlets = right_portlets.keys()
self.failUnless(PORTLET_NAME in right_portlets)
def testPermissionHasBeenSet(self):
roles = set(rolesForPermissionOn(SUBSCRIBE_PERMISSION, self.portal))
self.failUnlessEqual(roles, set(('Manager', 'Member')))
def testConfigletHasBeenAdded(self):
cptool = getToolByName(self.portal, 'portal_controlpanel')
configlets = [c.getId() for c in cptool.listActions()]
self.failUnless('cmfnotification_configuration' in configlets)
class TestUnInstallation(CMFNotificationTestCase):
"""Test that the product has been properly uninstalled."""
def afterSetUp(self):
"""Uninstall the product before running each test."""
qtool = getToolByName(self.portal, 'portal_quickinstaller')
self.setRoles(['Manager'])
qtool.uninstallProducts(['CMFNotification'])
def testToolIsNotThere(self):
tool = getToolByName(self.portal, TOOL_ID, None)
self.failUnless(tool is None)
def testSkinLayerIsNotThere(self):
stool = getToolByName(self.portal, 'portal_skins')
for skin, layers in stool._getSelections().items():
layers = layers.split(',')
self.failUnless (LAYER_NAME not in layers)
self.failUnless(LAYER_NAME not in stool.objectIds())
def testPortletDoNoExist(self):
base_url = self.portal.absolute_url()
for name in ('plone.leftcolumn', 'plone.rightcolumn'):
manager = getUtility(IPortletManager,
name=name,
context=self.portal)
titles = [p.title for p in manager.getAddablePortletTypes()]
self.failUnless(PORTLET_NAME not in titles)
manager = getUtility(IPortletManager,
name='plone.rightcolumn',
context=self.portal)
right_portlets = getMultiAdapter((self.portal, manager),
IPortletAssignmentMapping,
context=self.portal)
right_portlets = right_portlets.keys()
self.failUnless(PORTLET_NAME not in right_portlets)
def testConfigletDoNotExist(self):
cptool = getToolByName(self.portal, 'portal_controlpanel')
configlets = [c.getId() for c in cptool.listActions()]
self.failUnless('cmfnotification_configuration' not in configlets)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestInstallation))
suite.addTest(makeSuite(TestUnInstallation))
return suite
| cynapse/cynin | products/CMFNotification/tests/testInstallation.py | Python | gpl-3.0 | 4,873 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.orchestration.v1 import resource
FAKE_ID = '32e39358-2422-4ad0-a1b5-dd60696bf564'
FAKE_NAME = 'test_stack'
FAKE = {
'links': [{
'href': 'http://res_link',
'rel': 'self'
}, {
'href': 'http://stack_link',
'rel': 'stack'
}],
'logical_resource_id': 'the_resource',
'name': 'the_resource',
'physical_resource_id': '9f38ab5a-37c8-4e40-9702-ce27fc5f6954',
'required_by': [],
'resource_type': 'OS::Heat::FakeResource',
'status': 'CREATE_COMPLETE',
'status_reason': 'state changed',
'updated_time': '2015-03-09T12:15:57.233772',
}
class TestResource(base.TestCase):
def test_basic(self):
sot = resource.Resource()
self.assertEqual('resource', sot.resource_key)
self.assertEqual('resources', sot.resources_key)
self.assertEqual('/stacks/%(stack_name)s/%(stack_id)s/resources',
sot.base_path)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_retrieve)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = resource.Resource(**FAKE)
self.assertEqual(FAKE['links'], sot.links)
self.assertEqual(FAKE['logical_resource_id'], sot.logical_resource_id)
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['physical_resource_id'],
sot.physical_resource_id)
self.assertEqual(FAKE['required_by'], sot.required_by)
self.assertEqual(FAKE['resource_type'], sot.resource_type)
self.assertEqual(FAKE['status'], sot.status)
self.assertEqual(FAKE['status_reason'], sot.status_reason)
self.assertEqual(FAKE['updated_time'], sot.updated_at)
| ctrlaltdel/neutrinator | vendor/openstack/tests/unit/orchestration/v1/test_resource.py | Python | gpl-3.0 | 2,405 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from storageadmin.models import Appliance
class UpdateSubscription(models.Model):
"""name of the channel. eg: stable"""
name = models.CharField(max_length=64, unique=True)
"""detailed description or a longer name"""
description = models.CharField(max_length=128)
"""url of the repo"""
url = models.CharField(max_length=512)
appliance = models.ForeignKey(Appliance)
password = models.CharField(max_length=64, null=True)
"""status of subscription: active, inactive, expired etc.."""
status = models.CharField(max_length=64)
class Meta:
app_label = 'storageadmin'
| nkhare/rockstor-core | src/rockstor/storageadmin/models/update_subscription.py | Python | gpl-3.0 | 1,351 |
"""
Tests for waffle utils features.
"""
import crum
import ddt
from django.test import TestCase
from django.test.client import RequestFactory
from edx_django_utils.cache import RequestCache
from mock import patch
from opaque_keys.edx.keys import CourseKey
from waffle.testutils import override_flag
from .. import CourseWaffleFlag, WaffleFlagNamespace, WaffleSwitchNamespace, WaffleSwitch
from ..models import WaffleFlagCourseOverrideModel
@ddt.ddt
class TestCourseWaffleFlag(TestCase):
"""
Tests the CourseWaffleFlag.
"""
NAMESPACE_NAME = "test_namespace"
FLAG_NAME = "test_flag"
NAMESPACED_FLAG_NAME = NAMESPACE_NAME + "." + FLAG_NAME
TEST_COURSE_KEY = CourseKey.from_string("edX/DemoX/Demo_Course")
TEST_COURSE_2_KEY = CourseKey.from_string("edX/DemoX/Demo_Course_2")
TEST_NAMESPACE = WaffleFlagNamespace(NAMESPACE_NAME)
TEST_COURSE_FLAG = CourseWaffleFlag(TEST_NAMESPACE, FLAG_NAME)
def setUp(self):
super(TestCourseWaffleFlag, self).setUp()
request = RequestFactory().request()
self.addCleanup(crum.set_current_request, None)
crum.set_current_request(request)
RequestCache.clear_all_namespaces()
@ddt.data(
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on, 'waffle_enabled': False, 'result': True},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.off, 'waffle_enabled': True, 'result': False},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.unset, 'waffle_enabled': True, 'result': True},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.unset, 'waffle_enabled': False, 'result': False},
)
def test_course_waffle_flag(self, data):
"""
Tests various combinations of a flag being set in waffle and overridden
for a course.
"""
with patch.object(WaffleFlagCourseOverrideModel, 'override_value', return_value=data['course_override']):
with override_flag(self.NAMESPACED_FLAG_NAME, active=data['waffle_enabled']):
# check twice to test that the result is properly cached
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_KEY), data['result'])
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_KEY), data['result'])
# result is cached, so override check should happen once
WaffleFlagCourseOverrideModel.override_value.assert_called_once_with(
self.NAMESPACED_FLAG_NAME,
self.TEST_COURSE_KEY
)
# check flag for a second course
if data['course_override'] == WaffleFlagCourseOverrideModel.ALL_CHOICES.unset:
# When course override wasn't set for the first course, the second course will get the same
# cached value from waffle.
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_2_KEY), data['waffle_enabled'])
else:
# When course override was set for the first course, it should not apply to the second
# course which should get the default value of False.
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_2_KEY), False)
@ddt.data(
{'flag_undefined_default': None, 'result': False},
{'flag_undefined_default': False, 'result': False},
{'flag_undefined_default': True, 'result': True},
)
def test_undefined_waffle_flag(self, data):
"""
Test flag with various defaults provided for undefined waffle flags.
"""
test_course_flag = CourseWaffleFlag(
self.TEST_NAMESPACE,
self.FLAG_NAME,
flag_undefined_default=data['flag_undefined_default']
)
with patch.object(
WaffleFlagCourseOverrideModel,
'override_value',
return_value=WaffleFlagCourseOverrideModel.ALL_CHOICES.unset
):
# check twice to test that the result is properly cached
self.assertEqual(test_course_flag.is_enabled(self.TEST_COURSE_KEY), data['result'])
self.assertEqual(test_course_flag.is_enabled(self.TEST_COURSE_KEY), data['result'])
# result is cached, so override check should happen once
WaffleFlagCourseOverrideModel.override_value.assert_called_once_with(
self.NAMESPACED_FLAG_NAME,
self.TEST_COURSE_KEY
)
@ddt.data(
{'flag_undefined_default': None, 'result': False},
{'flag_undefined_default': False, 'result': False},
{'flag_undefined_default': True, 'result': True},
)
def test_without_request(self, data):
"""
Test the flag behavior when outside a request context.
"""
crum.set_current_request(None)
test_course_flag = CourseWaffleFlag(
self.TEST_NAMESPACE,
self.FLAG_NAME,
flag_undefined_default=data['flag_undefined_default']
)
self.assertEqual(test_course_flag.is_enabled(self.TEST_COURSE_KEY), data['result'])
class TestWaffleSwitch(TestCase):
"""
Tests the WaffleSwitch.
"""
NAMESPACE_NAME = "test_namespace"
WAFFLE_SWITCH_NAME = "test_switch_name"
TEST_NAMESPACE = WaffleSwitchNamespace(NAMESPACE_NAME)
WAFFLE_SWITCH = WaffleSwitch(TEST_NAMESPACE, WAFFLE_SWITCH_NAME)
def test_namespaced_switch_name(self):
"""
Verify namespaced_switch_name returns the correct namespace switch name
"""
expected = self.NAMESPACE_NAME + "." + self.WAFFLE_SWITCH_NAME
actual = self.WAFFLE_SWITCH.namespaced_switch_name
self.assertEqual(actual, expected)
| ahmedaljazzar/edx-platform | openedx/core/djangoapps/waffle_utils/tests/test_init.py | Python | agpl-3.0 | 5,739 |
# -*- coding: utf-8 -*-
from datetime import datetime
import uuid
from werkzeug.exceptions import Forbidden
import logging
import openerp
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.exceptions import Warning
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class KarmaError(Forbidden):
""" Karma-related error, used for forum and posts. """
pass
class Forum(osv.Model):
"""TDE TODO: set karma values for actions dynamic for a given forum"""
_name = 'forum.forum'
_description = 'Forums'
_inherit = ['mail.thread', 'website.seo.metadata']
def init(self, cr):
""" Add forum uuid for user email validation. """
forum_uuids = self.pool['ir.config_parameter'].search(cr, SUPERUSER_ID, [('key', '=', 'website_forum.uuid')])
if not forum_uuids:
self.pool['ir.config_parameter'].set_param(cr, SUPERUSER_ID, 'website_forum.uuid', str(uuid.uuid4()), ['base.group_system'])
_columns = {
'name': fields.char('Name', required=True, translate=True),
'faq': fields.html('Guidelines'),
'description': fields.html('Description'),
# karma generation
'karma_gen_question_new': fields.integer('Asking a question'),
'karma_gen_question_upvote': fields.integer('Question upvoted'),
'karma_gen_question_downvote': fields.integer('Question downvoted'),
'karma_gen_answer_upvote': fields.integer('Answer upvoted'),
'karma_gen_answer_downvote': fields.integer('Answer downvoted'),
'karma_gen_answer_accept': fields.integer('Accepting an answer'),
'karma_gen_answer_accepted': fields.integer('Answer accepted'),
'karma_gen_answer_flagged': fields.integer('Answer flagged'),
# karma-based actions
'karma_ask': fields.integer('Ask a question'),
'karma_answer': fields.integer('Answer a question'),
'karma_edit_own': fields.integer('Edit its own posts'),
'karma_edit_all': fields.integer('Edit all posts'),
'karma_close_own': fields.integer('Close its own posts'),
'karma_close_all': fields.integer('Close all posts'),
'karma_unlink_own': fields.integer('Delete its own posts'),
'karma_unlink_all': fields.integer('Delete all posts'),
'karma_upvote': fields.integer('Upvote'),
'karma_downvote': fields.integer('Downvote'),
'karma_answer_accept_own': fields.integer('Accept an answer on its own questions'),
'karma_answer_accept_all': fields.integer('Accept an answer to all questions'),
'karma_editor_link_files': fields.integer('Linking files (Editor)'),
'karma_editor_clickable_link': fields.integer('Clickable links (Editor)'),
'karma_comment_own': fields.integer('Comment its own posts'),
'karma_comment_all': fields.integer('Comment all posts'),
'karma_comment_convert_own': fields.integer('Convert its own answers to comments and vice versa'),
'karma_comment_convert_all': fields.integer('Convert all answers to comments and vice versa'),
'karma_comment_unlink_own': fields.integer('Unlink its own comments'),
'karma_comment_unlink_all': fields.integer('Unlink all comments'),
'karma_retag': fields.integer('Change question tags'),
'karma_flag': fields.integer('Flag a post as offensive'),
}
def _get_default_faq(self, cr, uid, context=None):
fname = openerp.modules.get_module_resource('website_forum', 'data', 'forum_default_faq.html')
with open(fname, 'r') as f:
return f.read()
return False
_defaults = {
'description': 'This community is for professionals and enthusiasts of our products and services.',
'faq': _get_default_faq,
'karma_gen_question_new': 0, # set to null for anti spam protection
'karma_gen_question_upvote': 5,
'karma_gen_question_downvote': -2,
'karma_gen_answer_upvote': 10,
'karma_gen_answer_downvote': -2,
'karma_gen_answer_accept': 2,
'karma_gen_answer_accepted': 15,
'karma_gen_answer_flagged': -100,
'karma_ask': 3, # set to not null for anti spam protection
'karma_answer': 3, # set to not null for anti spam protection
'karma_edit_own': 1,
'karma_edit_all': 300,
'karma_close_own': 100,
'karma_close_all': 500,
'karma_unlink_own': 500,
'karma_unlink_all': 1000,
'karma_upvote': 5,
'karma_downvote': 50,
'karma_answer_accept_own': 20,
'karma_answer_accept_all': 500,
'karma_editor_link_files': 20,
'karma_editor_clickable_link': 20,
'karma_comment_own': 3,
'karma_comment_all': 5,
'karma_comment_convert_own': 50,
'karma_comment_convert_all': 500,
'karma_comment_unlink_own': 50,
'karma_comment_unlink_all': 500,
'karma_retag': 75,
'karma_flag': 500,
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
return super(Forum, self).create(cr, uid, values, context=create_context)
class Post(osv.Model):
_name = 'forum.post'
_description = 'Forum Post'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = "is_correct DESC, vote_count DESC, write_date DESC"
def _get_user_vote(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
vote_ids = self.pool['forum.post.vote'].search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
for vote in self.pool['forum.post.vote'].browse(cr, uid, vote_ids, context=context):
res[vote.post_id.id] = vote.vote
return res
def _get_vote_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
for vote in post.vote_ids:
res[post.id] += int(vote.vote)
return res
def _get_post_from_vote(self, cr, uid, ids, context=None):
result = {}
for vote in self.pool['forum.post.vote'].browse(cr, uid, ids, context=context):
result[vote.post_id.id] = True
return result.keys()
def _get_user_favourite(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
if uid in [f.id for f in post.favourite_ids]:
res[post.id] = True
return res
def _get_favorite_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] += len(post.favourite_ids)
return res
def _get_post_from_hierarchy(self, cr, uid, ids, context=None):
post_ids = set(ids)
for post in self.browse(cr, SUPERUSER_ID, ids, context=context):
if post.parent_id:
post_ids.add(post.parent_id.id)
return list(post_ids)
def _get_child_count(self, cr, uid, ids, field_name=False, arg={}, context=None):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
if post.parent_id:
res[post.parent_id.id] = len(post.parent_id.child_ids)
else:
res[post.id] = len(post.child_ids)
return res
def _get_uid_answered(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = any(answer.create_uid.id == uid for answer in post.child_ids)
return res
def _get_has_validated_answer(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
ans_ids = self.search(cr, uid, [('parent_id', 'in', ids), ('is_correct', '=', True)], context=context)
for answer in self.browse(cr, uid, ans_ids, context=context):
res[answer.parent_id.id] = True
return res
def _is_self_reply(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = post.parent_id and post.parent_id.create_uid == post.create_uid or False
return res
def _get_post_karma_rights(self, cr, uid, ids, field_name, arg, context=None):
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = {
'karma_ask': post.forum_id.karma_ask,
'karma_answer': post.forum_id.karma_answer,
'karma_accept': post.parent_id and post.parent_id.create_uid.id == uid and post.forum_id.karma_answer_accept_own or post.forum_id.karma_answer_accept_all,
'karma_edit': post.create_uid.id == uid and post.forum_id.karma_edit_own or post.forum_id.karma_edit_all,
'karma_close': post.create_uid.id == uid and post.forum_id.karma_close_own or post.forum_id.karma_close_all,
'karma_unlink': post.create_uid.id == uid and post.forum_id.karma_unlink_own or post.forum_id.karma_unlink_all,
'karma_upvote': post.forum_id.karma_upvote,
'karma_downvote': post.forum_id.karma_downvote,
'karma_comment': post.create_uid.id == uid and post.forum_id.karma_comment_own or post.forum_id.karma_comment_all,
'karma_comment_convert': post.create_uid.id == uid and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all,
}
res[post.id].update({
'can_ask': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_ask'],
'can_answer': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_answer'],
'can_accept': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_accept'],
'can_edit': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_edit'],
'can_close': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_close'],
'can_unlink': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_unlink'],
'can_upvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_upvote'],
'can_downvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_downvote'],
'can_comment': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment'],
'can_comment_convert': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment_convert'],
})
return res
_columns = {
'name': fields.char('Title'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'content': fields.html('Content'),
'tag_ids': fields.many2many('forum.tag', 'forum_tag_rel', 'forum_id', 'forum_tag_id', 'Tags'),
'state': fields.selection([('active', 'Active'), ('close', 'Close'), ('offensive', 'Offensive')], 'Status'),
'views': fields.integer('Number of Views'),
'active': fields.boolean('Active'),
'is_correct': fields.boolean('Valid Answer', help='Correct Answer or Answer on this question accepted.'),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', 'in', ['email', 'comment'])
],
string='Post Messages', help="Comments on forum post",
),
# history
'create_date': fields.datetime('Asked on', select=True, readonly=True),
'create_uid': fields.many2one('res.users', 'Created by', select=True, readonly=True),
'write_date': fields.datetime('Update on', select=True, readonly=True),
'write_uid': fields.many2one('res.users', 'Updated by', select=True, readonly=True),
# vote fields
'vote_ids': fields.one2many('forum.post.vote', 'post_id', 'Votes'),
'user_vote': fields.function(_get_user_vote, string='My Vote', type='integer'),
'vote_count': fields.function(
_get_vote_count, string="Votes", type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['vote_ids'], 10),
'forum.post.vote': (_get_post_from_vote, [], 10),
}),
# favorite fields
'favourite_ids': fields.many2many('res.users', string='Favourite'),
'user_favourite': fields.function(_get_user_favourite, string="My Favourite", type='boolean'),
'favourite_count': fields.function(
_get_favorite_count, string='Favorite Count', type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['favourite_ids'], 10),
}),
# hierarchy
'parent_id': fields.many2one('forum.post', 'Question', ondelete='cascade'),
'self_reply': fields.function(
_is_self_reply, 'Reply to own question', type='boolean',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['parent_id', 'create_uid'], 10),
}),
'child_ids': fields.one2many('forum.post', 'parent_id', 'Answers'),
'child_count': fields.function(
_get_child_count, string="Answers", type='integer',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids'], 10),
}),
'uid_has_answered': fields.function(
_get_uid_answered, string='Has Answered', type='boolean',
),
'has_validated_answer': fields.function(
_get_has_validated_answer, string='Has a Validated Answered', type='boolean',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids', 'is_correct'], 10),
}
),
# closing
'closed_reason_id': fields.many2one('forum.post.reason', 'Reason'),
'closed_uid': fields.many2one('res.users', 'Closed by', select=1),
'closed_date': fields.datetime('Closed on', readonly=True),
# karma
'karma_ask': fields.function(_get_post_karma_rights, string='Karma to ask', type='integer', multi='_get_post_karma_rights'),
'karma_answer': fields.function(_get_post_karma_rights, string='Karma to answer', type='integer', multi='_get_post_karma_rights'),
'karma_accept': fields.function(_get_post_karma_rights, string='Karma to accept this answer', type='integer', multi='_get_post_karma_rights'),
'karma_edit': fields.function(_get_post_karma_rights, string='Karma to edit', type='integer', multi='_get_post_karma_rights'),
'karma_close': fields.function(_get_post_karma_rights, string='Karma to close', type='integer', multi='_get_post_karma_rights'),
'karma_unlink': fields.function(_get_post_karma_rights, string='Karma to unlink', type='integer', multi='_get_post_karma_rights'),
'karma_upvote': fields.function(_get_post_karma_rights, string='Karma to upvote', type='integer', multi='_get_post_karma_rights'),
'karma_downvote': fields.function(_get_post_karma_rights, string='Karma to downvote', type='integer', multi='_get_post_karma_rights'),
'karma_comment': fields.function(_get_post_karma_rights, string='Karma to comment', type='integer', multi='_get_post_karma_rights'),
'karma_comment_convert': fields.function(_get_post_karma_rights, string='karma to convert as a comment', type='integer', multi='_get_post_karma_rights'),
# access rights
'can_ask': fields.function(_get_post_karma_rights, string='Can Ask', type='boolean', multi='_get_post_karma_rights'),
'can_answer': fields.function(_get_post_karma_rights, string='Can Answer', type='boolean', multi='_get_post_karma_rights'),
'can_accept': fields.function(_get_post_karma_rights, string='Can Accept', type='boolean', multi='_get_post_karma_rights'),
'can_edit': fields.function(_get_post_karma_rights, string='Can Edit', type='boolean', multi='_get_post_karma_rights'),
'can_close': fields.function(_get_post_karma_rights, string='Can Close', type='boolean', multi='_get_post_karma_rights'),
'can_unlink': fields.function(_get_post_karma_rights, string='Can Unlink', type='boolean', multi='_get_post_karma_rights'),
'can_upvote': fields.function(_get_post_karma_rights, string='Can Upvote', type='boolean', multi='_get_post_karma_rights'),
'can_downvote': fields.function(_get_post_karma_rights, string='Can Downvote', type='boolean', multi='_get_post_karma_rights'),
'can_comment': fields.function(_get_post_karma_rights, string='Can Comment', type='boolean', multi='_get_post_karma_rights'),
'can_comment_convert': fields.function(_get_post_karma_rights, string='Can Convert to Comment', type='boolean', multi='_get_post_karma_rights'),
}
_defaults = {
'state': 'active',
'views': 0,
'active': True,
'vote_ids': list(),
'favourite_ids': list(),
'child_ids': list(),
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
post_id = super(Post, self).create(cr, uid, vals, context=create_context)
post = self.browse(cr, uid, post_id, context=context)
# karma-based access
if not post.parent_id and not post.can_ask:
raise KarmaError('Not enough karma to create a new question')
elif post.parent_id and not post.can_answer:
raise KarmaError('Not enough karma to answer to a question')
# messaging and chatter
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
if post.parent_id:
body = _(
'<p>A new answer for <i>%s</i> has been posted. <a href="%s/forum/%s/question/%s">Click here to access the post.</a></p>' %
(post.parent_id.name, base_url, slug(post.parent_id.forum_id), slug(post.parent_id))
)
self.message_post(cr, uid, post.parent_id.id, subject=_('Re: %s') % post.parent_id.name, body=body, subtype='website_forum.mt_answer_new', context=context)
else:
body = _(
'<p>A new question <i>%s</i> has been asked on %s. <a href="%s/forum/%s/question/%s">Click here to access the question.</a></p>' %
(post.name, post.forum_id.name, base_url, slug(post.forum_id), slug(post))
)
self.message_post(cr, uid, post_id, subject=post.name, body=body, subtype='website_forum.mt_question_new', context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_question_new, context=context)
return post_id
def write(self, cr, uid, ids, vals, context=None):
posts = self.browse(cr, uid, ids, context=context)
if 'state' in vals:
if vals['state'] in ['active', 'close'] and any(not post.can_close for post in posts):
raise KarmaError('Not enough karma to close or reopen a post.')
if 'active' in vals:
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to delete or reactivate a post')
if 'is_correct' in vals:
if any(not post.can_accept for post in posts):
raise KarmaError('Not enough karma to accept or refuse an answer')
# update karma except for self-acceptance
mult = 1 if vals['is_correct'] else -1
for post in self.browse(cr, uid, ids, context=context):
if vals['is_correct'] != post.is_correct and post.create_uid.id != uid:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * mult, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * mult, context=context)
if any(key not in ['state', 'active', 'is_correct', 'closed_uid', 'closed_date', 'closed_reason_id'] for key in vals.keys()) and any(not post.can_edit for post in posts):
raise KarmaError('Not enough karma to edit a post.')
res = super(Post, self).write(cr, uid, ids, vals, context=context)
# if post content modify, notify followers
if 'content' in vals or 'name' in vals:
for post in posts:
if post.parent_id:
body, subtype = _('Answer Edited'), 'website_forum.mt_answer_edit'
obj_id = post.parent_id.id
else:
body, subtype = _('Question Edited'), 'website_forum.mt_question_edit'
obj_id = post.id
self.message_post(cr, uid, obj_id, body=body, subtype=subtype, context=context)
return res
def reopen(self, cr, uid, ids, context=None):
if any(post.parent_id or post.state != 'close'
for post in self.browse(cr, uid, ids, context=context)):
return False
reason_offensive = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_7')
reason_spam = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_8')
for post in self.browse(cr, uid, ids, context=context):
if post.closed_reason_id.id in (reason_offensive, reason_spam):
_logger.info('Upvoting user <%s>, reopening spam/offensive question',
post.create_uid)
# TODO: in master, consider making this a tunable karma parameter
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id],
post.forum_id.karma_gen_question_downvote * -5,
context=context)
self.pool['forum.post'].write(cr, SUPERUSER_ID, ids, {'state': 'active'}, context=context)
def close(self, cr, uid, ids, reason_id, context=None):
if any(post.parent_id for post in self.browse(cr, uid, ids, context=context)):
return False
reason_offensive = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_7')
reason_spam = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_8')
if reason_id in (reason_offensive, reason_spam):
for post in self.browse(cr, uid, ids, context=context):
_logger.info('Downvoting user <%s> for posting spam/offensive contents',
post.create_uid)
# TODO: in master, consider making this a tunable karma parameter
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id],
post.forum_id.karma_gen_question_downvote * 5,
context=context)
self.pool['forum.post'].write(cr, uid, ids, {
'state': 'close',
'closed_uid': uid,
'closed_date': datetime.today().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
'closed_reason_id': reason_id,
}, context=context)
def unlink(self, cr, uid, ids, context=None):
posts = self.browse(cr, uid, ids, context=context)
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to unlink a post')
# if unlinking an answer with accepted answer: remove provided karma
for post in posts:
if post.is_correct:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * -1, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * -1, context=context)
return super(Post, self).unlink(cr, uid, ids, context=context)
def vote(self, cr, uid, ids, upvote=True, context=None):
Vote = self.pool['forum.post.vote']
vote_ids = Vote.search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
new_vote = '1' if upvote else '-1'
voted_forum_ids = set()
if vote_ids:
for vote in Vote.browse(cr, uid, vote_ids, context=context):
if upvote:
new_vote = '0' if vote.vote == '-1' else '1'
else:
new_vote = '0' if vote.vote == '1' else '-1'
Vote.write(cr, uid, vote_ids, {'vote': new_vote}, context=context)
voted_forum_ids.add(vote.post_id.id)
for post_id in set(ids) - voted_forum_ids:
for post_id in ids:
Vote.create(cr, uid, {'post_id': post_id, 'vote': new_vote}, context=context)
return {'vote_count': self._get_vote_count(cr, uid, ids, None, None, context=context)[ids[0]], 'user_vote': new_vote}
def convert_answer_to_comment(self, cr, uid, id, context=None):
""" Tools to convert an answer (forum.post) to a comment (mail.message).
The original post is unlinked and a new comment is posted on the question
using the post create_uid as the comment's author. """
post = self.browse(cr, SUPERUSER_ID, id, context=context)
if not post.parent_id:
return False
# karma-based action check: use the post field that computed own/all value
if not post.can_comment_convert:
raise KarmaError('Not enough karma to convert an answer to a comment')
# post the message
question = post.parent_id
values = {
'author_id': post.create_uid.partner_id.id,
'body': html2plaintext(post.content),
'type': 'comment',
'subtype': 'mail.mt_comment',
'date': post.create_date,
}
message_id = self.pool['forum.post'].message_post(
cr, uid, question.id,
context=dict(context, mail_create_nosubcribe=True),
**values)
# unlink the original answer, using SUPERUSER_ID to avoid karma issues
self.pool['forum.post'].unlink(cr, SUPERUSER_ID, [post.id], context=context)
return message_id
def convert_comment_to_answer(self, cr, uid, message_id, default=None, context=None):
""" Tool to convert a comment (mail.message) into an answer (forum.post).
The original comment is unlinked and a new answer from the comment's author
is created. Nothing is done if the comment's author already answered the
question. """
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, comment.res_id, context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not comment.author_id or not comment.author_id.user_ids: # only comment posted by users can be converted
return False
# karma-based action check: must check the message's author to know if own / all
karma_convert = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all
can_convert = uid == SUPERUSER_ID or user.karma >= karma_convert
if not can_convert:
raise KarmaError('Not enough karma to convert a comment to an answer')
# check the message's author has not already an answer
question = post.parent_id if post.parent_id else post
post_create_uid = comment.author_id.user_ids[0]
if any(answer.create_uid.id == post_create_uid.id for answer in question.child_ids):
return False
# create the new post
post_values = {
'forum_id': question.forum_id.id,
'content': comment.body,
'parent_id': question.id,
}
# done with the author user to have create_uid correctly set
new_post_id = self.pool['forum.post'].create(cr, post_create_uid.id, post_values, context=context)
# delete comment
self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [comment.id], context=context)
return new_post_id
def unlink_comment(self, cr, uid, id, message_id, context=None):
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, id, context=context)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if not comment.model == 'forum.post' or not comment.res_id == id:
return False
# karma-based action check: must check the message's author to know if own or all
karma_unlink = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_unlink_own or post.forum_id.karma_comment_unlink_all
can_unlink = uid == SUPERUSER_ID or user.karma >= karma_unlink
if not can_unlink:
raise KarmaError('Not enough karma to unlink a comment')
return self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [message_id], context=context)
def set_viewed(self, cr, uid, ids, context=None):
cr.execute("""UPDATE forum_post SET views = views+1 WHERE id IN %s""", (tuple(ids),))
return True
def _get_access_link(self, cr, uid, mail, partner, context=None):
post = self.pool['forum.post'].browse(cr, uid, mail.res_id, context=context)
res_id = post.parent_id and "%s#answer-%s" % (post.parent_id.id, post.id) or post.id
return "/forum/%s/question/%s" % (post.forum_id.id, res_id)
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, type='notification', subtype=None, context=None, **kwargs):
if thread_id and type == 'comment': # user comments have a restriction on karma
if isinstance(thread_id, (list, tuple)):
post_id = thread_id[0]
else:
post_id = thread_id
post = self.browse(cr, uid, post_id, context=context)
if not post.can_comment:
raise KarmaError('Not enough karma to comment')
return super(Post, self).message_post(cr, uid, thread_id, type=type, subtype=subtype, context=context, **kwargs)
class PostReason(osv.Model):
_name = "forum.post.reason"
_description = "Post Closing Reason"
_order = 'name'
_columns = {
'name': fields.char('Post Reason', required=True, translate=True),
}
class Vote(osv.Model):
_name = 'forum.post.vote'
_description = 'Vote'
_columns = {
'post_id': fields.many2one('forum.post', 'Post', ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'vote': fields.selection([('1', '1'), ('-1', '-1'), ('0', '0')], 'Vote', required=True),
'create_date': fields.datetime('Create Date', select=True, readonly=True),
# TODO master: store these two
'forum_id': fields.related('post_id', 'forum_id', type='many2one', relation='forum.forum', string='Forum'),
'recipient_id': fields.related('post_id', 'create_uid', type='many2one', relation='res.users', string='To', help="The user receiving the vote"),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'vote': lambda *args: '1',
}
def _get_karma_value(self, old_vote, new_vote, up_karma, down_karma):
_karma_upd = {
'-1': {'-1': 0, '0': -1 * down_karma, '1': -1 * down_karma + up_karma},
'0': {'-1': 1 * down_karma, '0': 0, '1': up_karma},
'1': {'-1': -1 * up_karma + down_karma, '0': -1 * up_karma, '1': 0}
}
return _karma_upd[old_vote][new_vote]
def create(self, cr, uid, vals, context=None):
vote_id = super(Vote, self).create(cr, uid, vals, context=context)
vote = self.browse(cr, uid, vote_id, context=context)
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise Warning('Not allowed to vote for its own post')
# karma check
if vote.vote == '1' and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif vote.vote == '-1' and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.recipient_id.id], karma_value, context=context)
return vote_id
def write(self, cr, uid, ids, values, context=None):
if 'vote' in values:
for vote in self.browse(cr, uid, ids, context=context):
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise Warning('Not allowed to vote for its own post')
# karma check
if (values['vote'] == '1' or vote.vote == '-1' and values['vote'] == '0') and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif (values['vote'] == '-1' or vote.vote == '1' and values['vote'] == '0') and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.recipient_id.id], karma_value, context=context)
res = super(Vote, self).write(cr, uid, ids, values, context=context)
return res
class Tags(osv.Model):
_name = "forum.tag"
_description = "Tag"
_inherit = ['website.seo.metadata']
def _get_posts_count(self, cr, uid, ids, field_name, arg, context=None):
return dict((tag_id, self.pool['forum.post'].search_count(cr, uid, [('tag_ids', 'in', tag_id)], context=context)) for tag_id in ids)
def _get_tag_from_post(self, cr, uid, ids, context=None):
return list(set(
[tag.id for post in self.pool['forum.post'].browse(cr, SUPERUSER_ID, ids, context=context) for tag in post.tag_ids]
))
_columns = {
'name': fields.char('Name', required=True),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'post_ids': fields.many2many('forum.post', 'forum_tag_rel', 'tag_id', 'post_id', 'Posts'),
'posts_count': fields.function(
_get_posts_count, type='integer', string="Number of Posts",
store={
'forum.post': (_get_tag_from_post, ['tag_ids'], 10),
}
),
'create_uid': fields.many2one('res.users', 'Created by', readonly=True),
}
| Jgarcia-IAS/SITE | addons/website_forum/models/forum.py | Python | agpl-3.0 | 36,003 |
from __future__ import absolute_import
from django.core.management import call_command
from teams.models import Team, TeamMember, Workflow
from widget.rpc import Rpc
def refresh_obj(m):
return m.__class__._default_manager.get(pk=m.pk)
def reset_solr():
# cause the default site to load
from haystack import backend
sb = backend.SearchBackend()
sb.clear()
call_command('update_index')
rpc = Rpc()
| ofer43211/unisubs | apps/teams/tests/teamstestsutils.py | Python | agpl-3.0 | 425 |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 09:47:41 2015
@author: thomas.douenne
"""
from __future__ import division
import statsmodels.formula.api as smf
from openfisca_france_indirect_taxation.examples.utils_example import simulate_df_calee_by_grosposte
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
simulated_variables = [
'pondmen',
'revtot',
'rev_disp_loyerimput',
'depenses_carburants',
'depenses_essence',
'depenses_diesel',
'strate',
'nenfants',
'nadultes',
'situacj',
'situapr',
'niveau_vie_decile'
]
for year in [2005]:
data_for_reg = simulate_df_calee_by_grosposte(simulated_variables = simulated_variables, year = year)
# In 2005 3 people consume fuel while their rev_disp_loyerimput is 0. Creates inf number in part_carburants
data_for_reg = data_for_reg[data_for_reg['rev_disp_loyerimput'] > 0]
data_for_reg['rev_disp_loyerimput_2'] = data_for_reg['rev_disp_loyerimput'] ** 2
data_for_reg['part_carburants'] = data_for_reg['depenses_carburants'] / data_for_reg['rev_disp_loyerimput']
data_for_reg['part_diesel'] = data_for_reg['depenses_diesel'] / data_for_reg['rev_disp_loyerimput']
data_for_reg['part_essence'] = data_for_reg['depenses_essence'] / data_for_reg['rev_disp_loyerimput']
data_for_reg['rural'] = 0
data_for_reg['petite_villes'] = 0
data_for_reg['villes_moyennes'] = 0
data_for_reg['grandes_villes'] = 0
data_for_reg['agglo_paris'] = 0
data_for_reg.loc[data_for_reg['strate'] == 0, 'rural'] = 1
data_for_reg.loc[data_for_reg['strate'] == 1, 'petite_villes'] = 1
data_for_reg.loc[data_for_reg['strate'] == 2, 'villes_moyennes'] = 1
data_for_reg.loc[data_for_reg['strate'] == 3, 'grandes_villes'] = 1
data_for_reg.loc[data_for_reg['strate'] == 4, 'agglo_paris'] = 1
deciles = ['decile_1', 'decile_2', 'decile_3', 'decile_4', 'decile_5', 'decile_6', 'decile_7', 'decile_8',
'decile_9', 'decile_10']
for decile in deciles:
data_for_reg[decile] = 0
number = decile.replace('decile_', '')
data_for_reg.loc[data_for_reg['niveau_vie_decile'] == int(number), decile] = 1
# Situation vis-à-vis de l'emploi :
# Travaille : emploi, stage, étudiant
# Autres : chômeurs, retraités, personnes au foyer, autres
data_for_reg['cj_travaille'] = 0
data_for_reg['pr_travaille'] = 0
data_for_reg.loc[data_for_reg['situacj'] < 4, 'cj_travaille'] = 1
data_for_reg.loc[data_for_reg['situacj'] == 0, 'cj_travaille'] = 0
data_for_reg.loc[data_for_reg['situapr'] < 4, 'pr_travaille'] = 1
data_for_reg['travaille'] = data_for_reg['cj_travaille'] + data_for_reg['pr_travaille']
regression_carburants = smf.ols(formula = 'part_carburants ~ \
decile_1 + decile_2 + decile_3 + decile_4 + decile_5 + decile_6 + decile_7 + decile_8 + decile_9 + \
rural + petite_villes + grandes_villes + agglo_paris + \
nenfants + nadultes + travaille',
data = data_for_reg).fit()
print regression_carburants.summary()
regression_diesel = smf.ols(formula = 'part_diesel ~ \
decile_1 + decile_2 + decile_3 + decile_4 + decile_5 + decile_6 + decile_7 + decile_8 + decile_9 + \
rural + petite_villes + grandes_villes + agglo_paris + \
nenfants + nadultes + travaille',
data = data_for_reg).fit()
print regression_diesel.summary()
regression_essence = smf.ols(formula = 'part_essence ~ \
decile_1 + decile_2 + decile_3 + decile_4 + decile_5 + decile_6 + decile_7 + decile_8 + decile_9 + \
rural + petite_villes + grandes_villes + agglo_paris + \
nenfants + nadultes + travaille',
data = data_for_reg).fit()
print regression_essence.summary()
# It is tempting to add a variable 'vehicule'. However, I think it is a case of bad control. It captures part
# of the effect we actually want to estimate.
| thomasdouenne/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/regress/regress_determinants_ticpe.py | Python | agpl-3.0 | 4,104 |
"""
Fixture to configure XQueue response.
"""
import json
import requests
from common.test.acceptance.fixtures import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object):
"""
Configure the XQueue stub's response to submissions.
"""
def __init__(self, pattern, response_dict):
"""
Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission that contains the string
`pattern`.
Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel.
"""
self._pattern = pattern
self._response_dict = response_dict
def install(self):
"""
Configure the stub via HTTP.
"""
url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue
payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload)
if not response.ok:
raise XQueueResponseFixtureError(
u"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._pattern, self._response_dict))
| edx-solutions/edx-platform | common/test/acceptance/fixtures/xqueue.py | Python | agpl-3.0 | 1,436 |
import copy
import json
import logging
import os
import sys
from lxml import etree
from lxml.etree import Element, ElementTree, XMLParser
from xblock.core import XML_NAMESPACES
from xblock.fields import Dict, Scope, ScopeIds
from xblock.runtime import KvsFieldData
import dogstats_wrapper as dog_stats_api
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT, XModuleDescriptor
log = logging.getLogger(__name__)
# assume all XML files are persisted as utf-8.
EDX_XML_PARSER = XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True,
encoding='utf-8')
def name_to_pathname(name):
"""
Convert a location name for use in a path: replace ':' with '/'.
This allows users of the xml format to organize content into directories
"""
return name.replace(':', '/')
def is_pointer_tag(xml_obj):
"""
Check if xml_obj is a pointer tag: <blah url_name="something" />.
No children, one attribute named url_name, no text.
Special case for course roots: the pointer is
<course url_name="something" org="myorg" course="course">
xml_obj: an etree Element
Returns a bool.
"""
if xml_obj.tag != "course":
expected_attr = set(['url_name'])
else:
expected_attr = set(['url_name', 'course', 'org'])
actual_attr = set(xml_obj.attrib.keys())
has_text = xml_obj.text is not None and len(xml_obj.text.strip()) > 0
return len(xml_obj) == 0 and actual_attr == expected_attr and not has_text
def serialize_field(value):
"""
Return a string version of the value (where value is the JSON-formatted, internally stored value).
If the value is a string, then we simply return what was passed in.
Otherwise, we return json.dumps on the input value.
"""
if isinstance(value, basestring):
return value
return json.dumps(value, cls=EdxJSONEncoder)
def deserialize_field(field, value):
"""
Deserialize the string version to the value stored internally.
Note that this is not the same as the value returned by from_json, as model types typically store
their value internally as JSON. By default, this method will return the result of calling json.loads
on the supplied value, unless json.loads throws a TypeError, or the type of the value returned by json.loads
is not supported for this class (from_json throws an Error). In either of those cases, this method returns
the input value.
"""
try:
deserialized = json.loads(value)
if deserialized is None:
return deserialized
try:
field.from_json(deserialized)
return deserialized
except (ValueError, TypeError):
# Support older serialized version, which was just a string, not result of json.dumps.
# If the deserialized version cannot be converted to the type (via from_json),
# just return the original value. For example, if a string value of '3.4' was
# stored for a String field (before we started storing the result of json.dumps),
# then it would be deserialized as 3.4, but 3.4 is not supported for a String
# field. Therefore field.from_json(3.4) will throw an Error, and we should
# actually return the original value of '3.4'.
return value
except (ValueError, TypeError):
# Support older serialized version.
return value
class XmlParserMixin(object):
"""
Class containing XML parsing functionality shared between XBlock and XModuleDescriptor.
"""
# Extension to append to filename paths
filename_extension = 'xml'
xml_attributes = Dict(help="Map of unhandled xml attributes, used only for storage between import and export",
default={}, scope=Scope.settings)
# VS[compat]. Backwards compatibility code that can go away after
# importing 2012 courses.
# A set of metadata key conversions that we want to make
metadata_translations = {
'slug': 'url_name',
'name': 'display_name',
}
@classmethod
def _translate(cls, key):
"""
VS[compat]
"""
return cls.metadata_translations.get(key, key)
# The attributes will be removed from the definition xml passed
# to definition_from_xml, and from the xml returned by definition_to_xml
# Note -- url_name isn't in this list because it's handled specially on
# import and export.
metadata_to_strip = ('data_dir',
'tabs', 'grading_policy',
'discussion_blackouts',
# VS[compat] -- remove the below attrs once everything is in the CMS
'course', 'org', 'url_name', 'filename',
# Used for storing xml attributes between import and export, for roundtrips
'xml_attributes')
metadata_to_export_to_policy = ('discussion_topics',)
@staticmethod
def _get_metadata_from_xml(xml_object, remove=True):
"""
Extract the metadata from the XML.
"""
meta = xml_object.find('meta')
if meta is None:
return ''
dmdata = meta.text
if remove:
xml_object.remove(meta)
return dmdata
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Return the definition to be passed to the newly created descriptor
during from_xml
xml_object: An etree Element
"""
raise NotImplementedError("%s does not implement definition_from_xml" % cls.__name__)
@classmethod
def clean_metadata_from_xml(cls, xml_object):
"""
Remove any attribute named for a field with scope Scope.settings from the supplied
xml_object
"""
for field_name, field in cls.fields.items():
if field.scope == Scope.settings and xml_object.get(field_name) is not None:
del xml_object.attrib[field_name]
@classmethod
def file_to_xml(cls, file_object):
"""
Used when this module wants to parse a file object to xml
that will be converted to the definition.
Returns an lxml Element
"""
return etree.parse(file_object, parser=EDX_XML_PARSER).getroot()
@classmethod
def load_file(cls, filepath, fs, def_id): # pylint: disable=invalid-name
"""
Open the specified file in fs, and call cls.file_to_xml on it,
returning the lxml object.
Add details and reraise on error.
"""
try:
with fs.open(filepath) as xml_file:
return cls.file_to_xml(xml_file)
except Exception as err:
# Add info about where we are, but keep the traceback
msg = 'Unable to load file contents at path %s for item %s: %s ' % (
filepath, def_id, err)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def load_definition(cls, xml_object, system, def_id, id_generator):
"""
Load a descriptor definition from the specified xml_object.
Subclasses should not need to override this except in special
cases (e.g. html module)
Args:
xml_object: an lxml.etree._Element containing the definition to load
system: the modulestore system (aka, runtime) which accesses data and provides access to services
def_id: the definition id for the block--used to compute the usage id and asides ids
id_generator: used to generate the usage_id
"""
# VS[compat] -- the filename attr should go away once everything is
# converted. (note: make sure html files still work once this goes away)
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
filepath = ''
aside_children = []
else:
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_filename"]
)
filepath = cls._format_filepath(xml_object.tag, filename)
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_load_definition_backcompat"]
)
candidates = cls.backcompat_paths(filepath)
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
usage_id = id_generator.create_usage(def_id)
aside_children = system.parse_asides(definition_xml, def_id, usage_id, id_generator)
# Add the attributes from the pointer node
definition_xml.attrib.update(xml_object.attrib)
definition_metadata = cls._get_metadata_from_xml(definition_xml)
cls.clean_metadata_from_xml(definition_xml)
definition, children = cls.definition_from_xml(definition_xml, system)
if definition_metadata:
definition['definition_metadata'] = definition_metadata
definition['filename'] = [filepath, filename]
if aside_children:
definition['aside_children'] = aside_children
return definition, children
@classmethod
def load_metadata(cls, xml_object):
"""
Read the metadata attributes from this xml_object.
Returns a dictionary {key: value}.
"""
metadata = {'xml_attributes': {}}
for attr, val in xml_object.attrib.iteritems():
# VS[compat]. Remove after all key translations done
attr = cls._translate(attr)
if attr in cls.metadata_to_strip:
if attr in ('course', 'org', 'url_name', 'filename'):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:xmlparser_util_mixin_load_metadata",
"metadata:{}".format(attr),
)
)
# don't load these
continue
if attr not in cls.fields:
metadata['xml_attributes'][attr] = val
else:
metadata[attr] = deserialize_field(cls.fields[attr], val)
return metadata
@classmethod
def apply_policy(cls, metadata, policy):
"""
Add the keys in policy to metadata, after processing them
through the attrmap. Updates the metadata dict in place.
"""
for attr, value in policy.iteritems():
attr = cls._translate(attr)
if attr not in cls.fields:
# Store unknown attributes coming from policy.json
# in such a way that they will export to xml unchanged
metadata['xml_attributes'][attr] = value
else:
metadata[attr] = value
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator): # pylint: disable=unused-argument
"""
Use `node` to construct a new block.
Arguments:
node (etree.Element): The xml node to parse into an xblock.
runtime (:class:`.Runtime`): The runtime to use while parsing.
keys (:class:`.ScopeIds`): The keys identifying where this block
will store its data.
id_generator (:class:`.IdGenerator`): An object that will allow the
runtime to generate correct definition and usage ids for
children of this block.
Returns (XBlock): The newly parsed XBlock
"""
# VS[compat] -- just have the url_name lookup, once translation is done
url_name = cls._get_url_name(node)
def_id = id_generator.create_definition(node.tag, url_name)
usage_id = id_generator.create_usage(def_id)
aside_children = []
# VS[compat] -- detect new-style each-in-a-file mode
if is_pointer_tag(node):
# new style:
# read the actual definition file--named using url_name.replace(':','/')
definition_xml, filepath = cls.load_definition_xml(node, runtime, def_id)
aside_children = runtime.parse_asides(definition_xml, def_id, usage_id, id_generator)
else:
filepath = None
definition_xml = node
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:xmlparser_util_mixin_parse_xml"]
)
# Note: removes metadata.
definition, children = cls.load_definition(definition_xml, runtime, def_id, id_generator)
# VS[compat] -- make Ike's github preview links work in both old and
# new file layouts
if is_pointer_tag(node):
# new style -- contents actually at filepath
definition['filename'] = [filepath, filepath]
metadata = cls.load_metadata(definition_xml)
# move definition metadata into dict
dmdata = definition.get('definition_metadata', '')
if dmdata:
metadata['definition_metadata_raw'] = dmdata
try:
metadata.update(json.loads(dmdata))
except Exception as err:
log.debug('Error in loading metadata %r', dmdata, exc_info=True)
metadata['definition_metadata_err'] = str(err)
definition_aside_children = definition.pop('aside_children', None)
if definition_aside_children:
aside_children.extend(definition_aside_children)
# Set/override any metadata specified by policy
cls.apply_policy(metadata, runtime.get_policy(usage_id))
field_data = {}
field_data.update(metadata)
field_data.update(definition)
field_data['children'] = children
field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
xblock = runtime.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
ScopeIds(None, node.tag, def_id, usage_id),
field_data,
)
if aside_children:
asides_tags = [x.tag for x in aside_children]
asides = runtime.get_asides(xblock)
for asd in asides:
if asd.scope_ids.block_type in asides_tags:
xblock.add_aside(asd)
return xblock
@classmethod
def _get_url_name(cls, node):
"""
Reads url_name attribute from the node
"""
return node.get('url_name', node.get('slug'))
@classmethod
def load_definition_xml(cls, node, runtime, def_id):
"""
Loads definition_xml stored in a dedicated file
"""
url_name = cls._get_url_name(node)
filepath = cls._format_filepath(node.tag, name_to_pathname(url_name))
definition_xml = cls.load_file(filepath, runtime.resources_fs, def_id)
return definition_xml, filepath
@classmethod
def _format_filepath(cls, category, name):
return u'{category}/{name}.{ext}'.format(category=category,
name=name,
ext=cls.filename_extension)
def export_to_file(self):
"""If this returns True, write the definition of this descriptor to a separate
file.
NOTE: Do not override this without a good reason. It is here
specifically for customtag...
"""
return True
def add_xml_to_node(self, node):
"""
For exporting, set data on `node` from ourselves.
"""
# Get the definition
xml_object = self.definition_to_xml(self.runtime.export_fs)
for aside in self.runtime.get_asides(self):
if aside.needs_serialization():
aside_node = etree.Element("unknown_root", nsmap=XML_NAMESPACES)
aside.add_xml_to_node(aside_node)
xml_object.append(aside_node)
self.clean_metadata_from_xml(xml_object)
# Set the tag on both nodes so we get the file path right.
xml_object.tag = self.category
node.tag = self.category
# Add the non-inherited metadata
for attr in sorted(own_metadata(self)):
# don't want e.g. data_dir
if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy:
val = serialize_field(self._field_data.get(self, attr))
try:
xml_object.set(attr, val)
except Exception:
logging.exception(
u'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!',
attr, val, self.url_name
)
for key, value in self.xml_attributes.items():
if key not in self.metadata_to_strip:
xml_object.set(key, serialize_field(value))
if self.export_to_file():
# Write the definition to a file
url_path = name_to_pathname(self.url_name)
filepath = self._format_filepath(self.category, url_path)
self.runtime.export_fs.makedirs(os.path.dirname(filepath), recreate=True)
with self.runtime.export_fs.open(filepath, 'wb') as fileobj:
ElementTree(xml_object).write(fileobj, pretty_print=True, encoding='utf-8')
else:
# Write all attributes from xml_object onto node
node.clear()
node.tag = xml_object.tag
node.text = xml_object.text
node.tail = xml_object.tail
node.attrib.update(xml_object.attrib)
node.extend(xml_object)
node.set('url_name', self.url_name)
# Special case for course pointers:
if self.category == 'course':
# add org and course attributes on the pointer tag
node.set('org', self.location.org)
node.set('course', self.location.course)
def definition_to_xml(self, resource_fs):
"""
Return a new etree Element object created from this modules definition.
"""
raise NotImplementedError(
"%s does not implement definition_to_xml" % self.__class__.__name__)
@property
def non_editable_metadata_fields(self):
"""
Return a list of all metadata fields that cannot be edited.
"""
non_editable_fields = super(XmlParserMixin, self).non_editable_metadata_fields
non_editable_fields.append(XmlParserMixin.xml_attributes)
return non_editable_fields
class XmlDescriptor(XmlParserMixin, XModuleDescriptor): # pylint: disable=abstract-method
"""
Mixin class for standardized parsing of XModule xml.
"""
resources_dir = None
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses.
Args:
xml_data (str): A string of xml that will be translated into data and children
for this module
system (:class:`.XMLParsingSystem):
id_generator (:class:`xblock.runtime.IdGenerator`): Used to generate the
usage_ids and definition_ids when loading this xml
"""
# Shim from from_xml to the parse_xml defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define from_xml themselves
# b) call super(..).from_xml(..)
return super(XmlDescriptor, cls).parse_xml(
etree.fromstring(xml_data),
system,
None, # This is ignored by XmlParserMixin
id_generator,
)
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Interpret the parsed XML in `node`, creating an XModuleDescriptor.
"""
if cls.from_xml != XmlDescriptor.from_xml:
# Skip the parse_xml from XmlParserMixin to get the shim parse_xml
# from XModuleDescriptor, which actually calls `from_xml`.
return super(XmlParserMixin, cls).parse_xml(node, runtime, keys, id_generator) # pylint: disable=bad-super-call
else:
return super(XmlDescriptor, cls).parse_xml(node, runtime, keys, id_generator)
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs.
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
"""
# Shim from export_to_xml to the add_xml_to_node defined in XmlParserMixin.
# This only exists to satisfy subclasses that both:
# a) define export_to_xml themselves
# b) call super(..).export_to_xml(..)
node = Element(self.category)
super(XmlDescriptor, self).add_xml_to_node(node)
return etree.tostring(node)
def add_xml_to_node(self, node):
"""
Export this :class:`XModuleDescriptor` as XML, by setting attributes on the provided
`node`.
"""
if self.export_to_xml != XmlDescriptor.export_to_xml:
# Skip the add_xml_to_node from XmlParserMixin to get the shim add_xml_to_node
# from XModuleDescriptor, which actually calls `export_to_xml`.
super(XmlParserMixin, self).add_xml_to_node(node) # pylint: disable=bad-super-call
else:
super(XmlDescriptor, self).add_xml_to_node(node)
| hastexo/edx-platform | common/lib/xmodule/xmodule/xml_module.py | Python | agpl-3.0 | 23,039 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Original Author: Mark Olesen <[email protected]>
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - The openfoam-org package is a modified version of the openfoam package.
# If changes are needed here, consider if they should also be applied there.
#
# - mpi handling: WM_MPLIB=SYSTEMMPI and populate prefs.{csh,sh} with values
# from spack.
#
# - Building with boost/cgal is not included, since some of the logic is not
# entirely clear and thus untested.
# - Resolution of flex, zlib needs more attention (within OpenFOAM)
#
# Known issues
# - Combining +zoltan with +int64 has not been tested, but probably won't work.
# - Combining +mgridgen with +int64 or +float32 probably won't work.
#
##############################################################################
import glob
import os
import re
import llnl.util.tty as tty
from spack import *
from spack.pkg.builtin.openfoam import (
OpenfoamArch,
add_extra_files,
mplib_content,
rewrite_environ_files,
write_environ,
)
from spack.util.environment import EnvironmentModifications
class OpenfoamOrg(Package):
"""OpenFOAM is a GPL-opensource C++ CFD-toolbox.
The openfoam.org release is managed by the OpenFOAM Foundation Ltd as
a licensee of the OPENFOAM trademark.
This offering is not approved or endorsed by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
"""
homepage = "https://www.openfoam.org/"
baseurl = "https://github.com/OpenFOAM"
url = "https://github.com/OpenFOAM/OpenFOAM-4.x/archive/version-4.1.tar.gz"
git = "https://github.com/OpenFOAM/OpenFOAM-dev.git"
version('develop', branch='master')
version('8', sha256='94ba11cbaaa12fbb5b356e01758df403ac8832d69da309a5d79f76f42eb008fc',
url=baseurl + '/OpenFOAM-8/archive/version-8.tar.gz')
version('7', sha256='12389cf092dc032372617785822a597aee434a50a62db2a520ab35ba5a7548b5',
url=baseurl + '/OpenFOAM-7/archive/version-7.tar.gz')
version('6', sha256='32a6af4120e691ca2df29c5b9bd7bc7a3e11208947f9bccf6087cfff5492f025',
url=baseurl + '/OpenFOAM-6/archive/version-6.tar.gz')
version('5.0', sha256='9057d6a8bb9fa18802881feba215215699065e0b3c5cdd0c0e84cb29c9916c89',
url=baseurl + '/OpenFOAM-5.x/archive/version-5.0.tar.gz')
version('4.1', sha256='2de18de64e7abdb1b649ad8e9d2d58b77a2b188fb5bcb6f7c2a038282081fd31',
url=baseurl + '/OpenFOAM-4.x/archive/version-4.1.tar.gz')
version('2.4.0', sha256='9529aa7441b64210c400c019dcb2e0410fcfd62a6f62d23b6c5994c4753c4465',
url=baseurl + '/OpenFOAM-2.4.x/archive/version-2.4.0.tar.gz')
version('2.3.1', sha256='2bbcf4d5932397c2087a9b6d7eeee6d2b1350c8ea4f455415f05e7cd94d9e5ba',
url='http://downloads.sourceforge.net/foam/OpenFOAM-2.3.1.tgz')
variant('int64', default=False,
description='Compile with 64-bit label')
variant('float32', default=False,
description='Compile with 32-bit scalar (single-precision)')
variant('source', default=True,
description='Install library/application sources and tutorials')
variant('metis', default=False,
description='With metis decomposition')
depends_on('mpi')
depends_on('zlib')
depends_on('flex')
depends_on('cmake', type='build')
# Require scotch with ptscotch - corresponds to standard OpenFOAM setup
depends_on('scotch~metis+mpi~int64', when='~int64')
depends_on('scotch~metis+mpi+int64', when='+int64')
depends_on('metis@5:', when='+metis')
depends_on('metis+int64', when='+metis+int64')
# General patches - foamEtcFile as per openfoam.com (robuster)
common = ['spack-Allwmake', 'README-spack']
assets = ['bin/foamEtcFile']
# Version-specific patches
patch('https://github.com/OpenFOAM/OpenFOAM-7/commit/ef33cf38ac9b811072a8970c71fbda35a90f6641.patch',
sha256='73103e6b1bdbf3b1e0d517cbbd11562e98c6e9464df5f43e5125e9a5b457d1c5', when='@7')
patch('50-etc.patch', when='@5.0:5.9')
patch('41-etc.patch', when='@4.1')
patch('41-site.patch', when='@4.1:')
patch('240-etc.patch', when='@:2.4.0')
patch('isnan.patch', when='@:2.4.0')
# Add support for SYSTEMMPI
patch('https://github.com/OpenFOAM/OpenFOAM-2.3.x/commit/ae9a670c99472787f3a5446ac2b522bf3519b796.patch',
sha256='6c4c535baca3ce64035d512265c4ce8effd39de7602c923c5e19985db68b632a', when='@:2.3.1')
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # <- Added by patch() method.
#
# - End of definitions / setup -
#
# Some user config settings
@property
def config(self):
settings = {
# Use SYSTEMMPI since openfoam-org doesn't have USERMPI
'mplib': 'SYSTEMMPI',
# Add links into bin/, lib/ (eg, for other applications)
'link': False,
}
# OpenFOAM v2.4 and earlier lacks WM_LABEL_OPTION
if self.spec.satisfies('@:2.4'):
settings['label-size'] = False
return settings
def setup_run_environment(self, env):
bashrc = self.prefix.etc.bashrc
try:
env.extend(EnvironmentModifications.from_sourcing_file(
bashrc, clean=True
))
except Exception as e:
msg = 'unexpected error when sourcing OpenFOAM bashrc [{0}]'
tty.warn(msg.format(str(e)))
def setup_dependent_build_environment(self, env, dependent_spec):
"""Location of the OpenFOAM project directory.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
env.set('FOAM_PROJECT_DIR', self.projectdir)
def setup_dependent_run_environment(self, env, dependent_spec):
"""Location of the OpenFOAM project directory.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamOrgArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('platforms', self.foam_arch, 'bin')
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('platforms', self.foam_arch, 'lib')
def rename_source(self):
"""This is fairly horrible.
The github tarfiles have weird names that do not correspond to the
canonical name. We need to rename these, but leave a symlink for
spack to work with.
"""
# Note that this particular OpenFOAM requires absolute directories
# to build correctly!
parent = os.path.dirname(self.stage.source_path)
original = os.path.basename(self.stage.source_path)
target = 'OpenFOAM-{0}'.format(self.version)
# Could also grep through etc/bashrc for WM_PROJECT_VERSION
with working_dir(parent):
if original != target and not os.path.lexists(target):
os.rename(original, target)
os.symlink(target, original)
tty.info('renamed {0} -> {1}'.format(original, target))
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
self.rename_source()
add_extra_files(self, self.common, self.assets)
# Avoid WM_PROJECT_INST_DIR for ThirdParty, site or jobControl.
# Use openfoam-site.patch to handle jobControl, site.
#
# Filtering: bashrc,cshrc (using a patch is less flexible)
edits = {
'WM_THIRD_PARTY_DIR':
r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party',
'WM_VERSION': str(self.version), # consistency
'FOAMY_HEX_MESH': '', # This is horrible (unset variable?)
}
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don't properly fit get placed in the etc/prefs.sh file (similiarly for
csh).
"""
# Filtering bashrc, cshrc
edits = {}
edits.update(self.foam_arch.foam_dict())
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# MPI content, with absolute paths
user_mpi = mplib_content(spec)
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
r'MPI_ROOT': spec['mpi'].prefix, # Absolute
r'MPI_ARCH_FLAGS': '"%s"' % user_mpi['FLAGS'],
r'MPI_ARCH_INC': '"%s"' % user_mpi['PINC'],
r'MPI_ARCH_LIBS': '"%s"' % user_mpi['PLIBS'],
}
# Content for etc/config.{csh,sh}/ files
self.etc_config = {
'CGAL': {},
'scotch': {},
'metis': {},
'paraview': [],
'gperftools': [], # Currently unused
}
if True:
self.etc_config['scotch'] = {
'SCOTCH_ARCH_PATH': spec['scotch'].prefix,
# For src/parallel/decompose/Allwmake
'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version),
}
if '+metis' in spec:
self.etc_config['metis'] = {
'METIS_ARCH_PATH': spec['metis'].prefix,
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
if self.etc_prefs:
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
# Adjust components to use SPACK variants
for component, subdict in self.etc_config.items():
# Versions up to 3.0 used an etc/config/component.sh naming
# convention instead of etc/config.sh/component
if spec.satisfies('@:3.0'):
write_environ(
subdict,
posix=join_path('etc', 'config', component) + '.sh',
cshell=join_path('etc', 'config', component) + '.csh')
else:
write_environ(
subdict,
posix=join_path('etc', 'config.sh', component),
cshell=join_path('etc', 'config.csh', component))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = []
if self.parallel: # Build in parallel? - pass via the environment
os.environ['WM_NCOMPPROCS'] = str(make_jobs)
builder = Executable(self.build_script)
builder(*args)
def install(self, spec, prefix):
"""Install under the projectdir"""
mkdirp(self.projectdir)
projdir = os.path.basename(self.projectdir)
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Having wmake and ~source is actually somewhat pointless...
# Install 'etc' before 'bin' (for symlinks)
# META-INFO for 1812 and later (or backported)
dirs = ['META-INFO', 'etc', 'bin', 'wmake']
if '+source' in spec:
dirs.extend(['applications', 'src', 'tutorials'])
for d in dirs:
if os.path.isdir(d):
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
dirs = ['platforms']
if '+source' in spec:
dirs.extend(['doc'])
# Install platforms (and doc) skipping intermediate targets
relative_ignore_paths = ['src', 'applications', 'html', 'Guides']
ignore = lambda p: p in relative_ignore_paths
for d in dirs:
install_tree(
d,
join_path(self.projectdir, d),
ignore=ignore,
symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path(os.path.relpath(self.install_log_path)),
join_path('log.' + str(self.foam_arch)))
if not self.config['link']:
return
# ln -s platforms/linux64GccXXX/lib lib
with working_dir(self.projectdir):
if os.path.isdir(self.archlib):
os.symlink(self.archlib, 'lib')
# (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .)
with working_dir(join_path(self.projectdir, 'bin')):
for f in [
f for f in glob.glob(join_path('..', self.archbin, "*"))
if os.path.isfile(f)
]:
os.symlink(f, os.path.basename(f))
# -----------------------------------------------------------------------------
class OpenfoamOrgArch(OpenfoamArch):
"""An openfoam-org variant of OpenfoamArch
"""
def update_arch(self, spec):
"""Handle differences in WM_ARCH naming
"""
OpenfoamArch.update_arch(self, spec)
# ARM64 (openfoam) -> Arm64 (openfoam-org)
self.arch = self.arch.replace("ARM64", "Arm64")
| LLNL/spack | var/spack/repos/builtin/packages/openfoam-org/package.py | Python | lgpl-2.1 | 16,158 |
#!/usr/bin/env python
# vim: set et ts=4 sw=4:
#coding:utf-8
#############################################################################
#
# mga-dialogs.py - Show mga msg dialog and about dialog.
#
# License: GPLv3
# Author: Angelo Naselli <[email protected]>
#############################################################################
###########
# imports #
###########
import sys
sys.path.insert(0,'../../../build/swig/python')
import os
import yui
log = yui.YUILog.instance()
log.setLogFileName("/tmp/debug.log")
log.enableDebugLogging( True )
appl = yui.YUI.application()
appl.setApplicationTitle("Show dialogs example")
#################
# class mainGui #
#################
class Info(object):
def __init__(self,title,richtext,text):
self.title=title
self.richtext=richtext
self.text=text
class mainGui():
"""
Main class
"""
def __init__(self):
self.factory = yui.YUI.widgetFactory()
self.dialog = self.factory.createPopupDialog()
mainvbox = self.factory.createVBox(self.dialog)
frame = self.factory.createFrame(mainvbox,"Test frame")
HBox = self.factory.createHBox(frame)
self.aboutbutton = self.factory.createPushButton(HBox,"&About")
self.closebutton = self.factory.createPushButton(self.factory.createRight(HBox), "&Close")
def ask_YesOrNo(self, info):
yui.YUI.widgetFactory
mgafactory = yui.YMGAWidgetFactory.getYMGAWidgetFactory(yui.YExternalWidgets.externalWidgetFactory("mga"))
dlg = mgafactory.createDialogBox(yui.YMGAMessageBox.B_TWO)
dlg.setTitle(info.title)
dlg.setText(info.text, info.richtext)
dlg.setButtonLabel("Yes", yui.YMGAMessageBox.B_ONE)
dlg.setButtonLabel("No", yui.YMGAMessageBox.B_TWO)
dlg.setMinSize(50, 5);
return dlg.show() == yui.YMGAMessageBox.B_ONE
def aboutDialog(self):
yui.YUI.widgetFactory;
mgafactory = yui.YMGAWidgetFactory.getYMGAWidgetFactory(yui.YExternalWidgets.externalWidgetFactory("mga"))
dlg = mgafactory.createAboutDialog("About dialog title example", "1.0.0", "GPLv3",
"Angelo Naselli", "This beautiful test example shows how it is easy to play with libyui bindings", "")
dlg.show();
def handleevent(self):
"""
Event-handler for the 'widgets' demo
"""
while True:
event = self.dialog.waitForEvent()
if event.eventType() == yui.YEvent.CancelEvent:
self.dialog.destroy()
break
if event.widget() == self.closebutton:
info = Info("Quit confirmation", 1, "Are you sure you want to quit?")
if self.ask_YesOrNo(info):
self.dialog.destroy()
break
if event.widget() == self.aboutbutton:
self.aboutDialog()
if __name__ == "__main__":
main_gui = mainGui()
main_gui.handleevent()
| anaselli/libyui-bindings | swig/python/examples/mga-dialogs.py | Python | lgpl-3.0 | 3,012 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves static content for "static_dir" and "static_files" handlers."""
import base64
import errno
import httplib
import mimetypes
import os
import os.path
import re
import zlib
from google.appengine.api import appinfo
from google.appengine.tools import augment_mimetypes
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import url_handler
_FILE_MISSING_ERRNO_CONSTANTS = frozenset([errno.ENOENT, errno.ENOTDIR])
# Run at import time so we only do this once.
augment_mimetypes.init()
class StaticContentHandler(url_handler.UserConfiguredURLHandler):
"""Abstract base class for subclasses serving static content."""
# Associate the full path of a static file with a 2-tuple containing the:
# - mtime at which the file was last read from disk
# - a etag constructed from a hash of the file's contents
# Statting a small file to retrieve its mtime is approximately 20x faster than
# reading it to generate a hash of its contents.
_filename_to_mtime_and_etag = {}
def __init__(self, root_path, url_map, url_pattern):
"""Initializer for StaticContentHandler.
Args:
root_path: A string containing the full path of the directory containing
the application's app.yaml file.
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
url_pattern: A re.RegexObject that matches URLs that should be handled by
this handler. It may also optionally bind groups.
"""
super(StaticContentHandler, self).__init__(url_map, url_pattern)
self._root_path = root_path
def _get_mime_type(self, path):
"""Returns the mime type for the file at the given path."""
if self._url_map.mime_type is not None:
return self._url_map.mime_type
_, extension = os.path.splitext(path)
return mimetypes.types_map.get(extension, 'application/octet-stream')
def _handle_io_exception(self, start_response, e):
"""Serves the response to an OSError or IOError.
Args:
start_response: A function with semantics defined in PEP-333. This
function will be called with a status appropriate to the given
exception.
e: An instance of OSError or IOError used to generate an HTTP status.
Returns:
An emply iterable.
"""
if e.errno in _FILE_MISSING_ERRNO_CONSTANTS:
start_response('404 Not Found', [])
else:
start_response('403 Forbidden', [])
return []
@staticmethod
def _calculate_etag(data):
return base64.b64encode(str(zlib.crc32(data)))
def _handle_path(self, full_path, environ, start_response):
"""Serves the response to a request for a particular file.
Note that production App Engine treats all methods as "GET" except "HEAD".
Unless set explicitly, the "Expires" and "Cache-Control" headers are
deliberately different from their production values to make testing easier.
If set explicitly then the values are preserved because the user may
reasonably want to test for them.
Args:
full_path: A string containing the absolute path to the file to serve.
environ: An environ dict for the current request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
data = None
if full_path in self._filename_to_mtime_and_etag:
last_mtime, etag = self._filename_to_mtime_and_etag[full_path]
else:
last_mtime = etag = None
user_headers = self._url_map.http_headers or appinfo.HttpHeadersDict()
if_match = environ.get('HTTP_IF_MATCH')
if_none_match = environ.get('HTTP_IF_NONE_MATCH')
try:
mtime = os.path.getmtime(full_path)
except (OSError, IOError) as e:
# RFC-2616 section 14.24 says:
# If none of the entity tags match, or if "*" is given and no current
# entity exists, the server MUST NOT perform the requested method, and
# MUST return a 412 (Precondition Failed) response.
if if_match:
start_response('412 Precondition Failed', [])
return []
elif self._url_map.require_matching_file:
return None
else:
return self._handle_io_exception(start_response, e)
if mtime != last_mtime:
try:
data = self._read_file(full_path)
except (OSError, IOError) as e:
return self._handle_io_exception(start_response, e)
etag = self._calculate_etag(data)
self._filename_to_mtime_and_etag[full_path] = mtime, etag
if if_match and not self._check_etag_match(if_match,
etag,
allow_weak_match=False):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
start_response('412 Precondition Failed',
[('ETag', '"%s"' % etag)])
return []
elif if_none_match and self._check_etag_match(if_none_match,
etag,
allow_weak_match=True):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
start_response('304 Not Modified',
[('ETag', '"%s"' % etag)])
return []
else:
if data is None:
try:
data = self._read_file(full_path)
except (OSError, IOError) as e:
return self._handle_io_exception(start_response, e)
etag = self._calculate_etag(data)
self._filename_to_mtime_and_etag[full_path] = mtime, etag
headers = [('Content-length', str(len(data)))]
if user_headers.Get('Content-type') is None:
headers.append(('Content-type', self._get_mime_type(full_path)))
if user_headers.Get('ETag') is None:
headers.append(('ETag', '"%s"' % etag))
if user_headers.Get('Expires') is None:
headers.append(('Expires', 'Fri, 01 Jan 1990 00:00:00 GMT'))
if user_headers.Get('Cache-Control') is None:
headers.append(('Cache-Control', 'no-cache'))
for name, value in user_headers.iteritems():
# "name" will always be unicode due to the way that ValidatedDict works.
headers.append((str(name), value))
start_response('200 OK', headers)
if environ['REQUEST_METHOD'] == 'HEAD':
return []
else:
return [data]
@staticmethod
def _read_file(full_path):
with open(full_path, 'rb') as f:
return f.read()
@staticmethod
def _check_etag_match(etag_headers, etag, allow_weak_match):
"""Checks if an etag header matches a given etag.
Args:
etag_headers: A string representing an e-tag header value e.g.
'"xyzzy", "r2d2xxxx", W/"c3piozzzz"' or '*'.
etag: The etag to match the header to. If None then only the '*' header
with match.
allow_weak_match: If True then weak etags are allowed to match.
Returns:
True if there is a match, False otherwise.
"""
# From RFC-2616:
# entity-tag = [ weak ] opaque-tag
# weak = "W/"
# opaque-tag = quoted-string
# quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
# qdtext = <any TEXT except <">>
# quoted-pair = "\" CHAR
# TEXT = <any OCTET except CTLs, but including LWS>
# CHAR = <any US-ASCII character (octets 0 - 127)>
# This parsing is not actually correct since it assumes that commas cannot
# appear in etags. But the generated etags do not contain commas so this
# still works.
for etag_header in etag_headers.split(','):
if etag_header.startswith('W/'):
if allow_weak_match:
etag_header = etag_header[2:]
else:
continue
etag_header = etag_header.strip().strip('"')
if etag_header == '*' or etag_header == etag:
return True
return False
@staticmethod
def _is_relative_path_valid(path):
"""Check if the relative path for a file is valid.
To match prod, redirection logic only fires on paths that contain a . or ..
as an entry, but ignores redundant separators. Since Dev App Server simply
passes the path to open, redundant separators are ignored (i.e. path/to/file
and path//to///file both map to the same thing). Since prod uses logic
that treats redundant separators as significant, we need to handle them
specially.
A related problem is that if a redundant separator is placed as the file
relative path, it can be passed to a StaticHandler as an absolute path.
As os.path.join causes an absolute path to throw away previous components
that could allow an attacker to read any file on the file system (i.e.
if there a static directory handle for /static and an attacker asks for the
path '/static//etc/passwd', '/etc/passwd' is passed as the relative path and
calling os.path.join([root_dir, '/etc/passwd']) returns '/etc/passwd'.)
Args:
path: a path relative to a static handler base.
Returns:
bool indicating whether the path is valid or not.
"""
# Note: can't do something like path == os.path.normpath(path) as Windows
# would normalize separators to backslashes.
return not os.path.isabs(path) and '' not in path.split('/')
@staticmethod
def _not_found_404(environ, start_response):
status = httplib.NOT_FOUND
start_response('%d %s' % (status, httplib.responses[status]),
[('Content-Type', 'text/plain')])
return ['%s not found' % environ['PATH_INFO']]
class StaticFilesHandler(StaticContentHandler):
"""Servers content for the "static_files" handler.
For example:
handlers:
- url: /(.*)/(.*)
static_files: \1/\2
upload: (.*)/(.*)
"""
def __init__(self, root_path, url_map):
"""Initializer for StaticFilesHandler.
Args:
root_path: A string containing the full path of the directory containing
the application's app.yaml file.
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in static_files handler: %s' % (url_map.url, e))
super(StaticFilesHandler, self).__init__(root_path,
url_map,
url_pattern)
def handle(self, match, environ, start_response):
"""Serves the file content matching the request.
Args:
match: The re.MatchObject containing the result of matching the URL
against this handler's URL pattern.
environ: An environ dict for the current request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
relative_path = match.expand(self._url_map.static_files)
if not self._is_relative_path_valid(relative_path):
if self._url_map.require_matching_file:
return None
else:
return self._not_found_404(environ, start_response)
full_path = os.path.join(self._root_path, relative_path)
return self._handle_path(full_path, environ, start_response)
class StaticDirHandler(StaticContentHandler):
"""Servers content for the "static_files" handler.
For example:
handlers:
- url: /css
static_dir: stylesheets
"""
def __init__(self, root_path, url_map):
"""Initializer for StaticDirHandler.
Args:
root_path: A string containing the full path of the directory containing
the application's app.yaml file.
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
url = url_map.url
# Take a url pattern like "/css" and transform it into a match pattern like
# "/css/(?P<file>.*)$"
if url[-1] != '/':
url += '/'
try:
url_pattern = re.compile('%s(?P<file>.*)$' % url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in static_dir handler: %s' % (url, e))
super(StaticDirHandler, self).__init__(root_path,
url_map,
url_pattern)
def handle(self, match, environ, start_response):
"""Serves the file content matching the request.
Args:
match: The re.MatchObject containing the result of matching the URL
against this handler's URL pattern.
environ: An environ dict for the current request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
relative_path = match.group('file')
if not self._is_relative_path_valid(relative_path):
return self._not_found_404(environ, start_response)
full_path = os.path.join(self._root_path,
self._url_map.static_dir,
relative_path)
return self._handle_path(full_path, environ, start_response)
| ProfessionalIT/professionalit-webiste | sdk/google_appengine/google/appengine/tools/devappserver2/static_files_handler.py | Python | lgpl-3.0 | 13,838 |
# Natural Language Toolkit: Sun Tsu-Bot
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Sam Huston 2007
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from util import *
"""
Tsu bot responds to all queries with a Sun Tsu sayings
Quoted from Sun Tsu's The Art of War
Translated by LIONEL GILES, M.A. 1910
Hosted by the Gutenberg Project
http://www.gutenberg.org/
"""
pairs = (
(r'quit',
( "Good-bye.",
"Plan well",
"May victory be your future")),
(r'[^\?]*\?',
("Please consider whether you can answer your own question.",
"Ask me no questions!")),
(r'[0-9]+(.*)',
("It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
"There are five essentials for victory")),
(r'[A-Ca-c](.*)',
("The art of war is of vital importance to the State.",
"All warfare is based on deception.",
"If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.",
"If the campaign is protracted, the resources of the State will not be equal to the strain.",
"Attack him where he is unprepared, appear where you are not expected.",
"There is no instance of a country having benefited from prolonged warfare.")),
(r'[D-Fd-f](.*)',
("The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.",
"Bring war material with you from home, but forage on the enemy.",
"In war, then, let your great object be victory, not lengthy campaigns.",
"To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.")),
(r'[G-Ig-i](.*)',
("Heaven signifies night and day, cold and heat, times and seasons.",
"It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
"The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.",
"One may know how to conquer without being able to do it.")),
(r'[J-Lj-l](.*)',
("There are three ways in which a ruler can bring misfortune upon his army.",
"By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.",
"By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.",
"By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.",
"There are five essentials for victory",
"He will win who knows when to fight and when not to fight.",
"He will win who knows how to handle both superior and inferior forces.",
"He will win whose army is animated by the same spirit throughout all its ranks.",
"He will win who, prepared himself, waits to take the enemy unprepared.",
"He will win who has military capacity and is not interfered with by the sovereign.")),
(r'[M-Om-o](.*)',
("If you know the enemy and know yourself, you need not fear the result of a hundred battles.",
"If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.",
"If you know neither the enemy nor yourself, you will succumb in every battle.",
"The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.")),
(r'[P-Rp-r](.*)',
("Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.",
"Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.",
"He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.",
"A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.",
"The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.")),
(r'[S-Us-u](.*)',
("What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.",
"Hence his victories bring him neither reputation for wisdom nor credit for courage.",
"Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.",
"In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.",
"There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.",
"Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.")),
(r'[V-Zv-z](.*)',
("It is a matter of life and death, a road either to safety or to ruin.",
"Hold out baits to entice the enemy. Feign disorder, and crush him.",
"All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.",
"Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.",
"So in war, the way is to avoid what is strong and to strike at what is weak.",
"Just as water retains no constant shape, so in warfare there are no constant conditions.")),
(r'(.*)',
( "Your statement insults me.",
""))
)
suntsu_chatbot = Chat(pairs, reflections)
def suntsu_chat():
print "Talk to the program by typing in plain English, using normal upper-"
print 'and lower-case letters and punctuation. Enter "quit" when done.'
print '='*72
print "You seek enlightenment?"
suntsu_chatbot.converse()
def demo():
suntsu_chat()
if __name__ == "__main__":
demo()
| tadgh/ArgoRevisit | third_party/nltk/chat/suntsu.py | Python | apache-2.0 | 6,223 |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import uuid
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.policy.backends import rules
from keystone import tests
from keystone.tests import test_v3
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class IdentityTestProtectedCase(test_v3.RestfulTestCase):
"""Test policy enforcement on the v3 Identity API."""
def setUp(self):
"""Setup for Identity Protection Test Cases.
As well as the usual housekeeping, create a set of domains,
users, roles and projects for the subsequent tests:
- Three domains: A,B & C. C is disabled.
- DomainA has user1, DomainB has user2 and user3
- DomainA has group1 and group2, DomainB has group3
- User1 has two roles on DomainA
- User2 has one role on DomainA
Remember that there will also be a fourth domain in existence,
the default domain.
"""
# Ensure that test_v3.RestfulTestCase doesn't load its own
# sample data, which would make checking the results of our
# tests harder
super(IdentityTestProtectedCase, self).setUp()
# Initialize the policy engine and allow us to write to a temp
# file in each test to create the policies
self.addCleanup(rules.reset)
rules.reset()
_unused, self.tmpfilename = tempfile.mkstemp()
self.config_fixture.config(policy_file=self.tmpfilename)
# A default auth request we can use - un-scoped user token
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'])
def load_sample_data(self):
# Start by creating a couple of domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'], self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'], self.domainB)
self.domainC = self.new_domain_ref()
self.domainC['enabled'] = False
self.assignment_api.create_domain(self.domainC['id'], self.domainC)
# Now create some users, one in domainA and two of them in domainB
self.user1 = self.new_user_ref(domain_id=self.domainA['id'])
self.user1['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.user1['id'], self.user1)
self.user2 = self.new_user_ref(domain_id=self.domainB['id'])
self.user2['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.user2['id'], self.user2)
self.user3 = self.new_user_ref(domain_id=self.domainB['id'])
self.user3['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.user3['id'], self.user3)
self.group1 = self.new_group_ref(domain_id=self.domainA['id'])
self.identity_api.create_group(self.group1['id'], self.group1)
self.group2 = self.new_group_ref(domain_id=self.domainA['id'])
self.identity_api.create_group(self.group2['id'], self.group2)
self.group3 = self.new_group_ref(domain_id=self.domainB['id'])
self.identity_api.create_group(self.group3['id'], self.group3)
self.role = self.new_role_ref()
self.assignment_api.create_role(self.role['id'], self.role)
self.role1 = self.new_role_ref()
self.assignment_api.create_role(self.role1['id'], self.role1)
self.assignment_api.create_grant(self.role['id'],
user_id=self.user1['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.user2['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role1['id'],
user_id=self.user1['id'],
domain_id=self.domainA['id'])
def _get_id_list_from_ref_list(self, ref_list):
result_list = []
for x in ref_list:
result_list.append(x['id'])
return result_list
def _set_policy(self, new_policy):
with open(self.tmpfilename, "w") as policyfile:
policyfile.write(jsonutils.dumps(new_policy))
def test_list_users_unprotected(self):
"""GET /users (unprotected)
Test Plan:
- Update policy so api is unprotected
- Use an un-scoped token to make sure we can get back all
the users independent of domain
"""
self._set_policy({"identity:list_users": []})
r = self.get('/users', auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.result.get('users'))
self.assertIn(self.user1['id'], id_list)
self.assertIn(self.user2['id'], id_list)
self.assertIn(self.user3['id'], id_list)
def test_list_users_filtered_by_domain(self):
"""GET /users?domain_id=mydomain (filtered)
Test Plan:
- Update policy so api is unprotected
- Use an un-scoped token to make sure we can filter the
users by domainB, getting back the 2 users in that domain
"""
self._set_policy({"identity:list_users": []})
url_by_name = '/users?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth)
# We should get back two users, those in DomainB
id_list = self._get_id_list_from_ref_list(r.result.get('users'))
self.assertIn(self.user2['id'], id_list)
self.assertIn(self.user3['id'], id_list)
def test_get_user_protected_match_id(self):
"""GET /users/{id} (match payload)
Test Plan:
- Update policy to protect api by user_id
- List users with user_id of user1 as filter, to check that
this will correctly match user_id in the flattened
payload
"""
# TODO(henry-nash, ayoung): It would be good to expand this
# test for further test flattening, e.g. protect on, say, an
# attribute of an object being created
new_policy = {"identity:get_user": [["user_id:%(user_id)s"]]}
self._set_policy(new_policy)
url_by_name = '/users/%s' % self.user1['id']
r = self.get(url_by_name, auth=self.auth)
self.assertEqual(self.user1['id'], r.result['user']['id'])
def test_get_user_protected_match_target(self):
"""GET /users/{id} (match target)
Test Plan:
- Update policy to protect api by domain_id
- Try and read a user who is in DomainB with a token scoped
to Domain A - this should fail
- Retry this for a user who is in Domain A, which should succeed.
- Finally, try getting a user that does not exist, which should
still return UserNotFound
"""
new_policy = {'identity:get_user':
[["domain_id:%(target.user.domain_id)s"]]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/users/%s' % self.user2['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
url_by_name = '/users/%s' % self.user1['id']
r = self.get(url_by_name, auth=self.auth)
self.assertEqual(self.user1['id'], r.result['user']['id'])
url_by_name = '/users/%s' % uuid.uuid4().hex
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.UserNotFound.code)
def test_revoke_grant_protected_match_target(self):
"""DELETE /domains/{id}/users/{id}/roles/{id} (match target)
Test Plan:
- Update policy to protect api by domain_id of entities in
the grant
- Try and delete the existing grant that has a user who is
from a different domain - this should fail.
- Retry this for a user who is in Domain A, which should succeed.
"""
new_policy = {'identity:revoke_grant':
[["domain_id:%(target.user.domain_id)s"]]}
self._set_policy(new_policy)
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domainA['id'],
'user_id': self.user2['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role['id']}
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
self.delete(member_url, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domainA['id'],
'user_id': self.user1['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role1['id']}
self.delete(member_url, auth=self.auth)
def test_list_users_protected_by_domain(self):
"""GET /users?domain_id=mydomain (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA with a filter
specifying domainA - we should only get back the one user
that is in domainA.
- Try and read the users from domainB - this should fail since
we don't have a token scoped for domainB
"""
new_policy = {"identity:list_users": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/users?domain_id=%s' % self.domainA['id']
r = self.get(url_by_name, auth=self.auth)
# We should only get back one user, the one in DomainA
id_list = self._get_id_list_from_ref_list(r.result.get('users'))
self.assertEqual(1, len(id_list))
self.assertIn(self.user1['id'], id_list)
# Now try for domainB, which should fail
url_by_name = '/users?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
def test_list_groups_protected_by_domain(self):
"""GET /groups?domain_id=mydomain (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA and make sure
we only get back the two groups that are in domainA
- Try and read the groups from domainB - this should fail since
we don't have a token scoped for domainB
"""
new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/groups?domain_id=%s' % self.domainA['id']
r = self.get(url_by_name, auth=self.auth)
# We should only get back two groups, the ones in DomainA
id_list = self._get_id_list_from_ref_list(r.result.get('groups'))
self.assertEqual(2, len(id_list))
self.assertIn(self.group1['id'], id_list)
self.assertIn(self.group2['id'], id_list)
# Now try for domainB, which should fail
url_by_name = '/groups?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
def test_list_groups_protected_by_domain_and_filtered(self):
"""GET /groups?domain_id=mydomain&name=myname (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA with a filter
specifying both domainA and the name of group.
- We should only get back the group in domainA that matches
the name
"""
new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/groups?domain_id=%s&name=%s' % (
self.domainA['id'], self.group2['name'])
r = self.get(url_by_name, auth=self.auth)
# We should only get back one user, the one in DomainA that matches
# the name supplied
id_list = self._get_id_list_from_ref_list(r.result.get('groups'))
self.assertEqual(1, len(id_list))
self.assertIn(self.group2['id'], id_list)
class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
"""Test policy enforcement of the sample v3 cloud policy file."""
def setUp(self):
"""Setup for v3 Cloud Policy Sample Test Cases.
The following data is created:
- Three domains: domainA, domainB and admin_domain
- One project, which name is 'project'
- domainA has three users: domain_admin_user, project_admin_user and
just_a_user:
- domain_admin_user has role 'admin' on domainA,
- project_admin_user has role 'admin' on the project,
- just_a_user has a non-admin role on both domainA and the project.
- admin_domain has user cloud_admin_user, with an 'admin' role
on admin_domain.
We test various api protection rules from the cloud sample policy
file to make sure the sample is valid and that we correctly enforce it.
"""
# Ensure that test_v3.RestfulTestCase doesn't load its own
# sample data, which would make checking the results of our
# tests harder
super(IdentityTestv3CloudPolicySample, self).setUp()
# Finally, switch to the v3 sample policy file
self.addCleanup(rules.reset)
rules.reset()
self.config_fixture.config(
policy_file=tests.dirs.etc('policy.v3cloudsample.json'))
def load_sample_data(self):
# Start by creating a couple of domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'], self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'], self.domainB)
self.admin_domain = {'id': 'admin_domain_id', 'name': 'Admin_domain'}
self.assignment_api.create_domain(self.admin_domain['id'],
self.admin_domain)
# And our users
self.cloud_admin_user = self.new_user_ref(
domain_id=self.admin_domain['id'])
self.cloud_admin_user['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.cloud_admin_user['id'],
self.cloud_admin_user)
self.just_a_user = self.new_user_ref(domain_id=self.domainA['id'])
self.just_a_user['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.just_a_user['id'], self.just_a_user)
self.domain_admin_user = self.new_user_ref(
domain_id=self.domainA['id'])
self.domain_admin_user['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.domain_admin_user['id'],
self.domain_admin_user)
self.project_admin_user = self.new_user_ref(
domain_id=self.domainA['id'])
self.project_admin_user['password'] = uuid.uuid4().hex
self.identity_api.create_user(self.project_admin_user['id'],
self.project_admin_user)
# The admin role and another plain role
self.admin_role = {'id': uuid.uuid4().hex, 'name': 'admin'}
self.assignment_api.create_role(self.admin_role['id'], self.admin_role)
self.role = self.new_role_ref()
self.assignment_api.create_role(self.role['id'], self.role)
# The cloud admin just gets the admin role
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.cloud_admin_user['id'],
domain_id=self.admin_domain['id'])
# Assign roles to the domain
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.domain_admin_user['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.just_a_user['id'],
domain_id=self.domainA['id'])
# Create a assign roles to the project
self.project = self.new_project_ref(domain_id=self.domainA['id'])
self.assignment_api.create_project(self.project['id'], self.project)
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.project_admin_user['id'],
project_id=self.project['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.just_a_user['id'],
project_id=self.project['id'])
def _stati(self, expected_status):
# Return the expected return codes for APIs with and without data
# with any specified status overriding the normal values
if expected_status is None:
return (200, 201, 204)
else:
return (expected_status, expected_status, expected_status)
def _test_user_management(self, domain_id, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
entity_url = '/users/%s' % self.just_a_user['id']
list_url = '/users?domain_id=%s' % domain_id
self.get(entity_url, auth=self.auth,
expected_status=status_OK)
self.get(list_url, auth=self.auth,
expected_status=status_OK)
user = {'description': 'Updated'}
self.patch(entity_url, auth=self.auth, body={'user': user},
expected_status=status_OK)
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
user_ref = self.new_user_ref(domain_id=domain_id)
self.post('/users', auth=self.auth, body={'user': user_ref},
expected_status=status_created)
def _test_project_management(self, domain_id, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
entity_url = '/projects/%s' % self.project['id']
list_url = '/projects?domain_id=%s' % domain_id
self.get(entity_url, auth=self.auth,
expected_status=status_OK)
self.get(list_url, auth=self.auth,
expected_status=status_OK)
project = {'description': 'Updated'}
self.patch(entity_url, auth=self.auth, body={'project': project},
expected_status=status_OK)
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
proj_ref = self.new_project_ref(domain_id=domain_id)
self.post('/projects', auth=self.auth, body={'project': proj_ref},
expected_status=status_created)
def _test_domain_management(self, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
entity_url = '/domains/%s' % self.domainB['id']
list_url = '/domains'
self.get(entity_url, auth=self.auth,
expected_status=status_OK)
self.get(list_url, auth=self.auth,
expected_status=status_OK)
domain = {'description': 'Updated', 'enabled': False}
self.patch(entity_url, auth=self.auth, body={'domain': domain},
expected_status=status_OK)
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
domain_ref = self.new_domain_ref()
self.post('/domains', auth=self.auth, body={'domain': domain_ref},
expected_status=status_created)
def _test_grants(self, target, entity_id, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
a_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(a_role['id'], a_role)
collection_url = (
'/%(target)s/%(target_id)s/users/%(user_id)s/roles' % {
'target': target,
'target_id': entity_id,
'user_id': self.just_a_user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': a_role['id']}
self.put(member_url, auth=self.auth,
expected_status=status_no_data)
self.head(member_url, auth=self.auth,
expected_status=status_no_data)
self.get(collection_url, auth=self.auth,
expected_status=status_OK)
self.delete(member_url, auth=self.auth,
expected_status=status_no_data)
def test_user_management(self):
# First, authenticate with a user that does not have the domain
# admin role - shouldn't be able to do much.
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
domain_id=self.domainA['id'])
self._test_user_management(
self.domainA['id'], expected=exception.ForbiddenAction.code)
# Now, authenticate with a user that does have the domain admin role
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_user_management(self.domainA['id'])
def test_user_management_by_cloud_admin(self):
# Test users management with a cloud admin. This user should
# be able to manage users in any domain.
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
self._test_user_management(self.domainA['id'])
def test_project_management(self):
# First, authenticate with a user that does not have the project
# admin role - shouldn't be able to do much.
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
domain_id=self.domainA['id'])
self._test_project_management(
self.domainA['id'], expected=exception.ForbiddenAction.code)
# ...but should still be able to list projects of which they are
# a member
url = '/users/%s/projects' % self.just_a_user['id']
self.get(url, auth=self.auth)
# Now, authenticate with a user that does have the domain admin role
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_project_management(self.domainA['id'])
def test_domain_grants(self):
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
domain_id=self.domainA['id'])
self._test_grants('domains', self.domainA['id'],
expected=exception.ForbiddenAction.code)
# Now, authenticate with a user that does have the domain admin role
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_grants('domains', self.domainA['id'])
# Check that with such a token we cannot modify grants on a
# different domain
self._test_grants('domains', self.domainB['id'],
expected=exception.ForbiddenAction.code)
def test_domain_grants_by_cloud_admin(self):
# Test domain grants with a cloud admin. This user should be
# able to manage roles on any domain.
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
self._test_grants('domains', self.domainA['id'])
def test_project_grants(self):
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
project_id=self.project['id'])
self._test_grants('projects', self.project['id'],
expected=exception.ForbiddenAction.code)
# Now, authenticate with a user that does have the project
# admin role
self.auth = self.build_authentication_request(
user_id=self.project_admin_user['id'],
password=self.project_admin_user['password'],
project_id=self.project['id'])
self._test_grants('projects', self.project['id'])
def test_project_grants_by_domain_admin(self):
# Test project grants with a domain admin. This user should be
# able to manage roles on any project in its own domain.
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_grants('projects', self.project['id'])
def test_cloud_admin(self):
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_domain_management(
expected=exception.ForbiddenAction.code)
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
self._test_domain_management()
| Sazzadmasud/Keystone_hash_token | keystone/tests/test_v3_protection.py | Python | apache-2.0 | 27,778 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" metrichandler.py """
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.proto import common_pb2
from heron.proto import tmaster_pb2
from heron.tools.tracker.src.python import constants
from heron.tools.tracker.src.python.handlers import BaseHandler
class MetricsHandler(BaseHandler):
"""
URL - /topologies/metrics
Parameters:
- cluster (required)
- role - (optional) Role used to submit the topology.
- environ (required)
- topology (required) name of the requested topology
- component (required)
- metricname (required, repeated)
- interval (optional)
- instance (optional, repeated)
The response JSON is a map of all the requested
(or if nothing is mentioned, all) components
of the topology, to the metrics that are reported
by that component.
"""
# pylint: disable=attribute-defined-outside-init
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
component = self.get_argument_component()
metric_names = self.get_required_arguments_metricnames()
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
interval = int(self.get_argument(constants.PARAM_INTERVAL, default=-1))
instances = self.get_arguments(constants.PARAM_INSTANCE)
metrics = yield tornado.gen.Task(
self.getComponentMetrics,
topology.tmaster, component, metric_names, instances, interval)
self.write_success_response(metrics)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
# pylint: disable=too-many-locals, no-self-use, unused-argument
@tornado.gen.coroutine
def getComponentMetrics(self,
tmaster,
componentName,
metricNames,
instances,
interval,
callback=None):
"""
Get the specified metrics for the given component name of this topology.
Returns the following dict on success:
{
"metrics": {
<metricname>: {
<instance>: <numeric value>,
<instance>: <numeric value>,
...
}, ...
},
"interval": <numeric value>,
"component": "..."
}
Raises exception on failure.
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
raise Exception("No Tmaster found")
host = tmaster.host
port = tmaster.stats_port
metricRequest = tmaster_pb2.MetricRequest()
metricRequest.component_name = componentName
if len(instances) > 0:
for instance in instances:
metricRequest.instance_id.append(instance)
for metricName in metricNames:
metricRequest.metric.append(metricName)
metricRequest.interval = interval
# Serialize the metricRequest to send as a payload
# with the HTTP request.
metricRequestString = metricRequest.SerializeToString()
url = "http://{0}:{1}/stats".format(host, port)
request = tornado.httpclient.HTTPRequest(url,
body=metricRequestString,
method='POST',
request_timeout=5)
Log.debug("Making HTTP call to fetch metrics")
Log.debug("url: " + url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting metrics from Tmaster, code: " + responseCode
Log.error(message)
raise Exception(message)
# Parse the response from tmaster.
metricResponse = tmaster_pb2.MetricResponse()
metricResponse.ParseFromString(result.body)
if metricResponse.status.status == common_pb2.NOTOK:
if metricResponse.status.HasField("message"):
Log.warn("Received response from Tmaster: %s", metricResponse.status.message)
# Form the response.
ret = {}
ret["interval"] = metricResponse.interval
ret["component"] = componentName
ret["metrics"] = {}
for metric in metricResponse.metric:
instance = metric.instance_id
for im in metric.metric:
metricname = im.name
value = im.value
if metricname not in ret["metrics"]:
ret["metrics"][metricname] = {}
ret["metrics"][metricname][instance] = value
raise tornado.gen.Return(ret)
| mycFelix/heron | heron/tools/tracker/src/python/handlers/metricshandler.py | Python | apache-2.0 | 5,834 |
"""Support for Firmata switch output."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from .const import (
CONF_INITIAL_STATE,
CONF_NEGATE_STATE,
CONF_PIN,
CONF_PIN_MODE,
DOMAIN,
)
from .entity import FirmataPinEntity
from .pin import FirmataBinaryDigitalOutput, FirmataPinUsedException
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Firmata switches."""
new_entities = []
board = hass.data[DOMAIN][config_entry.entry_id]
for switch in board.switches:
pin = switch[CONF_PIN]
pin_mode = switch[CONF_PIN_MODE]
initial = switch[CONF_INITIAL_STATE]
negate = switch[CONF_NEGATE_STATE]
api = FirmataBinaryDigitalOutput(board, pin, pin_mode, initial, negate)
try:
api.setup()
except FirmataPinUsedException:
_LOGGER.error(
"Could not setup switch on pin %s since pin already in use.",
switch[CONF_PIN],
)
continue
name = switch[CONF_NAME]
switch_entity = FirmataSwitch(api, config_entry, name, pin)
new_entities.append(switch_entity)
if new_entities:
async_add_entities(new_entities)
class FirmataSwitch(FirmataPinEntity, SwitchEntity):
"""Representation of a switch on a Firmata board."""
async def async_added_to_hass(self) -> None:
"""Set up a switch."""
await self._api.start_pin()
self.async_write_ha_state()
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self._api.is_on
async def async_turn_on(self, **kwargs) -> None:
"""Turn on switch."""
_LOGGER.debug("Turning switch %s on", self._name)
await self._api.turn_on()
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off switch."""
_LOGGER.debug("Turning switch %s off", self._name)
await self._api.turn_off()
self.async_write_ha_state()
| tchellomello/home-assistant | homeassistant/components/firmata/switch.py | Python | apache-2.0 | 2,287 |
# Copyright 2014, Jeff Buttars, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import acos_client.errors as acos_errors
import acos_client.v30.base as base
class HealthMonitor(base.BaseV30):
# Valid method objects
ICMP = 'icmp'
TCP = 'tcp'
HTTP = 'http'
HTTPS = 'https'
url_prefix = "/health/monitor/"
_method_objects = {
ICMP: {
"icmp": 1
},
HTTP: {
"http": 1,
"http-port": 80,
"http-expect": 1,
"http-response-code": "200",
"http-url": 1,
"url-type": "GET",
"url-path": "/",
},
HTTPS: {
"https": 1,
"web-port": 443,
"https-expect": 1,
"https-response-code": "200",
"https-url": 1,
"url-type": "GET",
"url-path": "/",
"disable-sslv2hello": 0
},
TCP: {
"method-tcp": 1,
"tcp-port": 80
},
}
def get(self, name, **kwargs):
return self._get(self.url_prefix + name, **kwargs)
def _set(self, action, name, mon_method, interval, timeout, max_retries,
method=None, url=None, expect_code=None, port=None, update=False,
**kwargs):
params = {
"monitor": {
"name": name,
"retry": int(max_retries),
"interval": int(interval),
"timeout": int(timeout),
"method": {
mon_method: self._method_objects[mon_method]
}
}
}
if method:
params['monitor']['method'][mon_method]['url-type'] = method
if url:
params['monitor']['method'][mon_method]['url-path'] = url
if expect_code:
k = "%s-response-code" % mon_method
params['monitor']['method'][mon_method][k] = str(expect_code)
if port:
if mon_method == self.HTTPS:
k = 'web-port'
else:
k = '%s-port' % mon_method
params['monitor']['method'][mon_method][k] = int(port)
if update:
action += name
self._post(action, params, **kwargs)
def create(self, name, mon_type, interval, timeout, max_retries,
method=None, url=None, expect_code=None, port=None, **kwargs):
try:
self.get(name)
except acos_errors.NotFound:
pass
else:
raise acos_errors.Exists()
self._set(self.url_prefix, name, mon_type, interval, timeout,
max_retries, method, url, expect_code, port, **kwargs)
def update(self, name, mon_type, interval, timeout, max_retries,
method=None, url=None, expect_code=None, port=None, **kwargs):
self.get(name) # We want a NotFound if it does not exist
self._set(self.url_prefix, name, mon_type, interval, timeout,
max_retries, method, url, expect_code, port, update=True,
**kwargs)
def delete(self, name):
self._delete(self.url_prefix + name)
| dougwig/acos-client | acos_client/v30/slb/hm.py | Python | apache-2.0 | 3,696 |
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All sorts of properties for every field.
Generated by ./generate-onetime-js-widget-data.py
AMD-style module definition.
Note: No leading, internal path before the [], since we'd have to hardwire it
to data/whatever-fns.js which is inflexible.
"""
# TODO: Clean up the description fields that actually contain data
# extracted from the comments of dbroot_v2.proto.
# The META_INFO contains already vetted snippets.
META_INFO = r"""
{
"end_snippet.bbs_server_info.base_url:value": {
"abstract_fieldpath": "end_snippet.bbs_server_info.base_url:value",
"default_value": null,
"description": "URL of the server including protocol, domain and port. Can be translated if we use different servers for different languages.",
"empty_concrete_fieldpath": "end_snippet.bbs_server_info.base_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "base_url:value",
"presence": "optional",
"short_label": "base_url",
"typ": "string"
},
"end_snippet.bbs_server_info.file_submit_path:value": {
"abstract_fieldpath": "end_snippet.bbs_server_info.file_submit_path:value",
"default_value": null,
"description": "Path on server where files can be submitted.",
"empty_concrete_fieldpath": "end_snippet.bbs_server_info.file_submit_path:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "file_submit_path:value",
"presence": "optional",
"short_label": "file_submit_path",
"typ": "string"
},
"end_snippet.bbs_server_info.name:value": {
"abstract_fieldpath": "end_snippet.bbs_server_info.name:value",
"default_value": null,
"description": "Name that will be displayed in context menu to user. Must be translated.",
"empty_concrete_fieldpath": "end_snippet.bbs_server_info.name:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "name:value",
"presence": "optional",
"short_label": "name",
"typ": "string"
},
"end_snippet.bbs_server_info.post_wizard_path:value": {
"abstract_fieldpath": "end_snippet.bbs_server_info.post_wizard_path:value",
"default_value": null,
"description": "Path on server where wizard can be found.",
"empty_concrete_fieldpath": "end_snippet.bbs_server_info.post_wizard_path:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "post_wizard_path:value",
"presence": "optional",
"short_label": "post_wizard_path",
"typ": "string"
},
"end_snippet.client_options.disable_disk_cache": {
"abstract_fieldpath": "end_snippet.client_options.disable_disk_cache",
"default_value": null,
"description": "If true, no data will be cached on disk for this database. It will not be accessible offline.",
"empty_concrete_fieldpath": "end_snippet.client_options.disable_disk_cache",
"enum_vals": null,
"js_validation_rule": {
"required": false
},
"name": "disable_disk_cache",
"presence": "optional",
"short_label": "disable_disk_cache",
"typ": "bool"
},
"end_snippet.cobrand_info.logo_url": {
"abstract_fieldpath": "end_snippet.cobrand_info.logo_url",
"default_value": null,
"description": "URL of image to use as logo. Can be remote or local. However, using local URLs depends on the installation of the client and should be used carefully.",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].logo_url",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "logo_url",
"presence": "required",
"short_label": "logo_url",
"typ": "string"
},
"end_snippet.cobrand_info.screen_size": {
"abstract_fieldpath": "end_snippet.cobrand_info.screen_size",
"default_value": "0.0",
"description": "If specified and strictly positive but <= 1.0, makes logo scalable with screen by forcing its width to occupy a fixed fraction of the screeen. For instance, a value of .25 makes the given logo occupy 25% of the screen.",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].screen_size",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "screen_size",
"presence": "optional",
"short_label": "screen_size",
"typ": "double"
},
"end_snippet.cobrand_info.tie_point": {
"abstract_fieldpath": "end_snippet.cobrand_info.tie_point",
"default_value": "BOTTOM_LEFT",
"description": "Controls reference point in overlay.",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].tie_point",
"enum_vals": {
"BOTTOM_CENTER": 7,
"BOTTOM_LEFT": 6,
"BOTTOM_RIGHT": 8,
"MID_CENTER": 4,
"MID_LEFT": 3,
"MID_RIGHT": 5,
"TOP_CENTER": 1,
"TOP_LEFT": 0,
"TOP_RIGHT": 2
},
"js_validation_rule": {
"required": true
},
"name": "tie_point",
"presence": "optional",
"short_label": "tie_point",
"typ": "TiePoint"
},
"end_snippet.cobrand_info.x_coord.is_relative": {
"abstract_fieldpath": "end_snippet.cobrand_info.x_coord.is_relative",
"default_value": "false",
"description": "If true, the coordinate is relative to the screen.",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].x_coord.is_relative",
"enum_vals": null,
"js_validation_rule": {
"required": false
},
"name": "is_relative",
"presence": "optional",
"short_label": "is_relative",
"typ": "bool"
},
"end_snippet.cobrand_info.x_coord.value": {
"abstract_fieldpath": "end_snippet.cobrand_info.x_coord.value",
"default_value": "0.0",
"description": "Coordinate value. Interpretation depends on is_relative (absolute or",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].x_coord.value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "value",
"presence": "required",
"short_label": "value",
"typ": "double"
},
"end_snippet.cobrand_info.y_coord.is_relative": {
"abstract_fieldpath": "end_snippet.cobrand_info.y_coord.is_relative",
"default_value": "false",
"description": "If true, the coordinate is relative to the screen.",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].y_coord.is_relative",
"enum_vals": null,
"js_validation_rule": {
"required": false
},
"name": "is_relative",
"presence": "optional",
"short_label": "is_relative",
"typ": "bool"
},
"end_snippet.cobrand_info.y_coord.value": {
"abstract_fieldpath": "end_snippet.cobrand_info.y_coord.value",
"default_value": "0.0",
"description": "Coordinate value. Interpretation depends on is_relative (absolute or",
"empty_concrete_fieldpath": "end_snippet.cobrand_info.[].y_coord.value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "value",
"presence": "required",
"short_label": "value",
"typ": "double"
},
"end_snippet.default_web_page_intl_url:value": {
"abstract_fieldpath": "end_snippet.default_web_page_intl_url:value",
"default_value": null,
"description": "Default location of web page.",
"empty_concrete_fieldpath": "end_snippet.default_web_page_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "default_web_page_intl_url:value",
"presence": "optional",
"short_label": "default_web_page_intl_url",
"typ": "string"
},
"end_snippet.earth_intl_url:value": {
"abstract_fieldpath": "end_snippet.earth_intl_url:value",
"default_value": null,
"description": "Location of international page for earth.",
"empty_concrete_fieldpath": "end_snippet.earth_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "earth_intl_url:value",
"presence": "optional",
"short_label": "earth_intl_url",
"typ": "string"
},
"end_snippet.elevation_service_base_url": {
"abstract_fieldpath": "end_snippet.elevation_service_base_url",
"default_value": "",
"description": "Terrain elevation service URL. If empty, service will be unavailable.",
"empty_concrete_fieldpath": "end_snippet.elevation_service_base_url",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "elevation_service_base_url",
"presence": "optional",
"short_label": "elevation_service_base_url",
"typ": "string"
},
"end_snippet.hide_user_data": {
"abstract_fieldpath": "end_snippet.hide_user_data",
"default_value": "false",
"description": "If true, hides user license key in about dialog. Useful for Pro only, allows information to not be visible for shared license keys.",
"empty_concrete_fieldpath": "end_snippet.hide_user_data",
"enum_vals": null,
"js_validation_rule": {
"required": false
},
"name": "hide_user_data",
"presence": "optional",
"short_label": "hide_user_data",
"typ": "bool"
},
"end_snippet.keyboard_shortcuts_url:value": {
"abstract_fieldpath": "end_snippet.keyboard_shortcuts_url:value",
"default_value": null,
"description": "URL for keyboard shortcuts page. If not specified, this URL is built from user_guide_intl_url as user_guide_intl_url + \"ug_keyboard.html\".",
"empty_concrete_fieldpath": "end_snippet.keyboard_shortcuts_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "keyboard_shortcuts_url:value",
"presence": "optional",
"short_label": "keyboard_shortcuts_url",
"typ": "string"
},
"end_snippet.model.compressed_negative_altitude_threshold": {
"abstract_fieldpath": "end_snippet.model.compressed_negative_altitude_threshold",
"default_value": null,
"description": "Threshold below which negative altitudes are compressed",
"empty_concrete_fieldpath": "end_snippet.model.compressed_negative_altitude_threshold",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "compressed_negative_altitude_threshold",
"presence": "optional",
"short_label": "compressed_negative_altitude_threshold",
"typ": "double"
},
"end_snippet.model.elevation_bias": {
"abstract_fieldpath": "end_snippet.model.elevation_bias",
"default_value": null,
"description": "Elevation bias",
"empty_concrete_fieldpath": "end_snippet.model.elevation_bias",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "elevation_bias",
"presence": "optional",
"short_label": "elevation_bias",
"typ": "double"
},
"end_snippet.model.flattening": {
"abstract_fieldpath": "end_snippet.model.flattening",
"default_value": "0.00335281066474748",
"description": "Planet flattening. Default value is 1.0/298.257223563 (from WGS84).",
"empty_concrete_fieldpath": "end_snippet.model.flattening",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "flattening",
"presence": "optional",
"short_label": "flattening",
"typ": "double"
},
"end_snippet.model.negative_altitude_exponent_bias": {
"abstract_fieldpath": "end_snippet.model.negative_altitude_exponent_bias",
"default_value": null,
"description": "Bias for negative altitude so that ocean tiles can be streamed to older clients",
"empty_concrete_fieldpath": "end_snippet.model.negative_altitude_exponent_bias",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "negative_altitude_exponent_bias",
"presence": "optional",
"short_label": "negative_altitude_exponent_bias",
"typ": "int32"
},
"end_snippet.model.radius": {
"abstract_fieldpath": "end_snippet.model.radius",
"default_value": "6378.13700",
"description": "Mean planet radius. Default value is the WGS84 model for earth.",
"empty_concrete_fieldpath": "end_snippet.model.radius",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "radius",
"presence": "optional",
"short_label": "radius",
"typ": "double"
},
"end_snippet.privacy_policy_url:value": {
"abstract_fieldpath": "end_snippet.privacy_policy_url:value",
"default_value": null,
"description": "URL for the privacy policy.",
"empty_concrete_fieldpath": "end_snippet.privacy_policy_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "privacy_policy_url:value",
"presence": "optional",
"short_label": "privacy_policy_url",
"typ": "string"
},
"end_snippet.release_notes_url:value": {
"abstract_fieldpath": "end_snippet.release_notes_url:value",
"default_value": null,
"description": "URL for release notes page.",
"empty_concrete_fieldpath": "end_snippet.release_notes_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "release_notes_url:value",
"presence": "optional",
"short_label": "release_notes_url",
"typ": "string"
},
"end_snippet.reverse_geocoder_protocol_version": {
"abstract_fieldpath": "end_snippet.reverse_geocoder_protocol_version",
"default_value": "3",
"description": "Reverse geocoder protocol version. Default is 3 which is the protocol supported by newer clients.",
"empty_concrete_fieldpath": "end_snippet.reverse_geocoder_protocol_version",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "reverse_geocoder_protocol_version",
"presence": "optional",
"short_label": "reverse_geocoder_protocol_version",
"typ": "int32"
},
"end_snippet.reverse_geocoder_url:value": {
"abstract_fieldpath": "end_snippet.reverse_geocoder_url:value",
"default_value": null,
"description": "Reverse geocoder server URL",
"empty_concrete_fieldpath": "end_snippet.reverse_geocoder_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "reverse_geocoder_url:value",
"presence": "optional",
"short_label": "reverse_geocoder_url",
"typ": "string"
},
"end_snippet.show_signin_button": {
"abstract_fieldpath": "end_snippet.show_signin_button",
"default_value": null,
"description": "If true, shows the signin button in the upper right corner.",
"empty_concrete_fieldpath": "end_snippet.show_signin_button",
"enum_vals": null,
"js_validation_rule": {
"required": false
},
"name": "show_signin_button",
"presence": "optional",
"short_label": "show_signin_button",
"typ": "bool"
},
"end_snippet.startup_tips_intl_url:value": {
"abstract_fieldpath": "end_snippet.startup_tips_intl_url:value",
"default_value": null,
"description": "URL from which to load startup tips in Earth 7.0 and higher.",
"empty_concrete_fieldpath": "end_snippet.startup_tips_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "startup_tips_intl_url:value",
"presence": "optional",
"short_label": "startup_tips_intl_url",
"typ": "string"
},
"end_snippet.support_answer_intl_url:value": {
"abstract_fieldpath": "end_snippet.support_answer_intl_url:value",
"default_value": null,
"description": "Url to support answer.",
"empty_concrete_fieldpath": "end_snippet.support_answer_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "support_answer_intl_url:value",
"presence": "optional",
"short_label": "support_answer_intl_url",
"typ": "string"
},
"end_snippet.support_center_intl_url:value": {
"abstract_fieldpath": "end_snippet.support_center_intl_url:value",
"default_value": null,
"description": "Url to support center.",
"empty_concrete_fieldpath": "end_snippet.support_center_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "support_center_intl_url:value",
"presence": "optional",
"short_label": "support_center_intl_url",
"typ": "string"
},
"end_snippet.support_request_intl_url:value": {
"abstract_fieldpath": "end_snippet.support_request_intl_url:value",
"default_value": null,
"description": "Url to support pages.",
"empty_concrete_fieldpath": "end_snippet.support_request_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "support_request_intl_url:value",
"presence": "optional",
"short_label": "support_request_intl_url",
"typ": "string"
},
"end_snippet.support_topic_intl_url:value": {
"abstract_fieldpath": "end_snippet.support_topic_intl_url:value",
"default_value": null,
"description": "Url to support topics used by certain diagnostic messages.",
"empty_concrete_fieldpath": "end_snippet.support_topic_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "support_topic_intl_url:value",
"presence": "optional",
"short_label": "support_topic_intl_url",
"typ": "string"
},
"end_snippet.swoop_parameters.start_dist_in_meters": {
"abstract_fieldpath": "end_snippet.swoop_parameters.start_dist_in_meters",
"default_value": null,
"description": "Controls how far from a target swooping should start.",
"empty_concrete_fieldpath": "end_snippet.swoop_parameters.start_dist_in_meters",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "start_dist_in_meters",
"presence": "optional",
"short_label": "start_dist_in_meters",
"typ": "double"
},
"end_snippet.tutorial_url:value": {
"abstract_fieldpath": "end_snippet.tutorial_url:value",
"default_value": null,
"description": "URL for tutorial page. If not specified, this URL is built from user_guide_intl_url as user_guide_intl_url + \"tutorials/index.html\".",
"empty_concrete_fieldpath": "end_snippet.tutorial_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "tutorial_url:value",
"presence": "optional",
"short_label": "tutorial_url",
"typ": "string"
},
"end_snippet.use_ge_logo": {
"abstract_fieldpath": "end_snippet.use_ge_logo",
"default_value": "true",
"description": "If false, hides the Google logo.",
"empty_concrete_fieldpath": "end_snippet.use_ge_logo",
"enum_vals": null,
"js_validation_rule": {
"required": false
},
"name": "use_ge_logo",
"presence": "optional",
"short_label": "use_ge_logo",
"typ": "bool"
},
"end_snippet.user_guide_intl_url:value": {
"abstract_fieldpath": "end_snippet.user_guide_intl_url:value",
"default_value": null,
"description": "Url to user guide.",
"empty_concrete_fieldpath": "end_snippet.user_guide_intl_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "user_guide_intl_url:value",
"presence": "optional",
"short_label": "user_guide_intl_url",
"typ": "string"
},
"end_snippet.valid_database.database_name:value": {
"abstract_fieldpath": "end_snippet.valid_database.database_name:value",
"default_value": null,
"description": "Human-readable name of database (such as \"Primary Database\" or \"Digital Globe Database\")",
"empty_concrete_fieldpath": "end_snippet.valid_database.[].database_name:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "database_name:value",
"presence": "optional",
"short_label": "database_name",
"typ": "string"
},
"end_snippet.valid_database.database_url": {
"abstract_fieldpath": "end_snippet.valid_database.database_url",
"default_value": null,
"description": "URL of server. This can include a path and query, and must be a well-formed, absolute URL.",
"empty_concrete_fieldpath": "end_snippet.valid_database.[].database_url",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "database_url",
"presence": "required",
"short_label": "database_url",
"typ": "string"
},
"end_snippet.search_config.error_page_url:value": {
"abstract_fieldpath": "end_snippet.search_config.error_page_url:value",
"default_value": "about:blank",
"description": "URL of a page that will be displayed if a network error or other local error occurs while performing a search. This might be an error for a local geocode while in offline mode, a connection error while trying to connect to MFE, or some other error where we can't get an error message from the server. (Obviously this page should be cached locally, or it's not terribly useful.) The URL should be fully encoded, and can use $[hl] and friends if necessary.",
"empty_concrete_fieldpath": "end_snippet.search_config.error_page_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "error_page_url:value",
"presence": "optional",
"short_label": "error_page_url",
"typ": "string"
},
"end_snippet.search_config.kml_render_url:value": {
"abstract_fieldpath": "end_snippet.search_config.kml_render_url:value",
"default_value": "/earth/client/kmlrender/index_$[hl].html",
"description": "URL of a page that will be shown when KML is rendered in the search panel. This page should have JavaScript that reads the KML from the environment and renders it as HTML, but should NOT perform onebox or searchlet searches. The URL should be fully encoded, and can use $[hl] and friends if necessary.",
"empty_concrete_fieldpath": "end_snippet.search_config.kml_render_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "kml_render_url:value",
"presence": "optional",
"short_label": "kml_render_url",
"typ": "string"
},
"end_snippet.search_config.kml_search_url:value": {
"abstract_fieldpath": "end_snippet.search_config.kml_search_url:value",
"default_value": "/earth/client/kmlrender/index_$[hl].html",
"description": "URL of a page that will be shown when a KML search is performed. This page should have JavaScript that reads the KML from the environment and renders it as HTML, and also performs onebox and searchlet searches if applicable. The URL should be fully encoded, and can use $[hl] and friends if necessary.",
"empty_concrete_fieldpath": "end_snippet.search_config.kml_search_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "kml_search_url:value",
"presence": "optional",
"short_label": "kml_search_url",
"typ": "string"
},
"end_snippet.search_config.search_history_url:value": {
"abstract_fieldpath": "end_snippet.search_config.search_history_url:value",
"default_value": "http://www.google.com/earth/client/search/history_$[hl].html",
"description": "URL of a page that will be shown when the search history is requested. This page should have JavaScript that reads the search history from the client and renders it as HTML.",
"empty_concrete_fieldpath": "end_snippet.search_config.search_history_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "search_history_url:value",
"presence": "optional",
"short_label": "search_history_url",
"typ": "string"
},
"end_snippet.google_maps_url:value": {
"abstract_fieldpath": "end_snippet.google_maps_url:value",
"default_value": "",
"description": "URL for Google Maps, for features like 'View in Maps'.",
"empty_concrete_fieldpath": "end_snippet.google_maps_url:value",
"enum_vals": null,
"js_validation_rule": {
"required": true
},
"name": "google_maps_url:value",
"presence": "optional",
"short_label": "google_maps_url",
"typ": "string"
}
}
"""
| tst-ahernandez/earthenterprise | earth_enterprise/src/server/wsgi/serve/snippets/data/metainfo_by_fieldpath.py | Python | apache-2.0 | 24,676 |
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
""" _importer.py
Merge _yang_ns for subpackage to a single _yang_ns at runtime.
"""
import importlib
import pkgutil
from ydk import models
class YangNs(object):
def __init__(self, d):
self.__dict__ = d
_yang_ns_dict = {}
exempt_keys = set(['__builtins__', '__doc__', '__file__',
'__name__', '__package__'])
try:
_yang_ns = importlib.import_module('ydk.models._yang_ns')
except ImportError:
for (importer, name, ispkg) in pkgutil.iter_modules(models.__path__):
if ispkg:
try:
mod_yang_ns = importlib.import_module('ydk.models.%s._yang_ns' % name)
except ImportError:
continue
keys = set(mod_yang_ns.__dict__) - exempt_keys
for key in keys:
if key not in _yang_ns_dict:
_yang_ns_dict[key] = mod_yang_ns.__dict__[key]
else:
if isinstance(_yang_ns_dict[key], dict):
_yang_ns_dict[key].update(mod_yang_ns.__dict__[key])
else:
# shadow old value
_yang_ns_dict[key] = mod_yang_ns.__dict__[key]
_yang_ns = YangNs(_yang_ns_dict)
| 111pontes/ydk-py | core/ydk/providers/_importer.py | Python | apache-2.0 | 1,938 |
"""Update a task in maniphest.
you can use the 'task id' output from the 'arcyon task-create' command as input
to this command.
usage examples:
update task '99' with a new title, only show id:
$ arcyon task-update 99 -t 'title' --format-id
99
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_taskupdate
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
import phlcon_maniphest
import phlcon_project
import phlcon_user
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
# make a list of priority names in increasing order of importance
priority_name_list = phlcon_maniphest.PRIORITIES.keys()
priority_name_list.sort(
key=lambda x: phlcon_maniphest.PRIORITIES[x])
priorities = parser.add_argument_group(
'optional priority arguments',
'use any of ' + textwrap.fill(
str(priority_name_list)))
output_group = parser.add_argument_group(
'Output format arguments',
'Mutually exclusive, defaults to "--format-summary"')
output = output_group.add_mutually_exclusive_group()
opt = parser.add_argument_group(
'Optional task arguments',
'You can supply these later via the web interface if you wish')
priorities.add_argument(
'--priority',
'-p',
choices=priority_name_list,
metavar="PRIORITY",
default=None,
type=str,
help="the priority or importance of the task")
parser.add_argument(
'id',
metavar='INT',
help='the id of the task',
type=str)
parser.add_argument(
'--title',
'-t',
metavar='STRING',
help='the short title of the task',
default=None,
type=str)
opt.add_argument(
'--description',
'-d',
metavar='STRING',
help='the long description of the task',
default=None,
type=str)
opt.add_argument(
'--owner',
'-o',
metavar='USER',
help='the username of the owner',
type=str)
opt.add_argument(
'--ccs',
'-c',
nargs="*",
metavar='USER',
help='a list of usernames to cc on the task',
type=str)
opt.add_argument(
'--projects',
nargs="*",
metavar='PROJECT',
default=[],
help='a list of project names to add the task to',
type=str)
opt.add_argument(
'--comment',
'-m',
metavar='STRING',
help='an optional comment to make on the task',
default=None,
type=str)
output.add_argument(
'--format-summary',
action='store_true',
help='will print a human-readable summary of the result.')
output.add_argument(
'--format-id',
action='store_true',
help='will print just the id of the new task, for scripting.')
output.add_argument(
'--format-url',
action='store_true',
help='will print just the url of the new task, for scripting.')
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
if args.title and not args.title.strip():
print('you must supply a non-empty title', file=sys.stderr)
return 1
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
# create_task expects an integer
priority = None
if args.priority is not None:
priority = phlcon_maniphest.PRIORITIES[args.priority]
# conduit expects PHIDs not plain usernames
user_phids = phlcon_user.UserPhidCache(conduit)
if args.owner:
user_phids.add_hint(args.owner)
if args.ccs:
user_phids.add_hint_list(args.ccs)
owner = user_phids.get_phid(args.owner) if args.owner else None
ccs = [user_phids.get_phid(u) for u in args.ccs] if args.ccs else None
# conduit expects PHIDs not plain project names
projects = None
if args.projects:
project_to_phid = phlcon_project.make_project_to_phid_dict(conduit)
projects = [project_to_phid[p] for p in args.projects]
result = phlcon_maniphest.update_task(
conduit,
args.id,
args.title,
args.description,
priority,
owner,
ccs,
projects,
args.comment)
if args.format_id:
print(result.id)
elif args.format_url:
print(result.uri)
else: # args.format_summary:
message = (
"Updated task '{task_id}', you can view it at this URL:\n"
" {url}"
).format(
task_id=result.id,
url=result.uri)
print(message)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| kjedruczyk/phabricator-tools | py/aon/aoncmd_taskupdate.py | Python | apache-2.0 | 5,958 |
import logging
import terminal
INFO = logging.INFO
# between info and debug
VERBOSE = (logging.INFO + logging.DEBUG) / 2
DEBUG = logging.DEBUG
log = logging.getLogger('rdopkg')
log.setLevel(logging.INFO)
if len(log.handlers) < 1:
formatter = logging.Formatter(fmt='%(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
class LogTerminal(terminal.Terminal):
@property
def warn(self):
return self.yellow
@property
def important(self):
return self.yellow_bold
@property
def error(self):
return self.red
@property
def good(self):
return self.green
@property
def cmd(self):
return self.cyan
term = LogTerminal()
def set_colors(colors):
global term
if colors == 'yes':
if not terminal.COLOR_TERMINAL:
return False
term = LogTerminal(force_styling=True)
return True
elif colors == 'no':
if not terminal.COLOR_TERMINAL:
return True
term = LogTerminal(force_styling=None)
return True
elif colors == 'auto':
term = LogTerminal()
return True
return False
def error(*args, **kwargs):
if args:
largs = list(args)
largs[0] = term.error(args[0])
args = tuple(largs)
log.error(*args, **kwargs)
def warn(*args, **kwargs):
if args:
largs = list(args)
largs[0] = term.warn(args[0])
args = tuple(largs)
log.warning(*args, **kwargs)
def success(*args, **kwargs):
if args:
largs = list(args)
largs[0] = term.good(args[0])
args = tuple(largs)
log.info(*args, **kwargs)
def info(*args, **kwargs):
log.info(*args, **kwargs)
def verbose(*args, **kwargs):
log.log(VERBOSE, *args, **kwargs)
def debug(*args, **kwargs):
log.debug(*args, **kwargs)
def command(*args, **kwargs):
log.info(*args, **kwargs)
| ktdreyer/rdopkg | rdopkg/utils/log.py | Python | apache-2.0 | 1,968 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.command_processing.base_command_processor import BaseCommandProcessor
class ConfigVlanCommandProcessor(BaseCommandProcessor):
def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args):
super(ConfigVlanCommandProcessor, self).init(switch_configuration, terminal_controller, logger, piping_processor)
self.vlan = args[0]
def get_prompt(self):
return self.switch_configuration.name + "(config-vlan)#"
def do_name(self, *args):
self.vlan.name = (args[0][:32])
def do_exit(self):
self.is_done = True
| internap/fake-switches | fake_switches/cisco/command_processor/config_vlan.py | Python | apache-2.0 | 1,182 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsinfo`` is a command-line script based on astropy.io.fits for
printing a summary of the HDUs in one or more FITS files(s) to the
standard output.
Example usage of ``fitsinfo``:
1. Print a summary of the HDUs in a FITS file::
$ fitsinfo filename.fits
Filename: filename.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 138 ()
1 SCI ImageHDU 61 (800, 800) int16
2 SCI ImageHDU 61 (800, 800) int16
3 SCI ImageHDU 61 (800, 800) int16
4 SCI ImageHDU 61 (800, 800) int16
2. Print a summary of HDUs of all the FITS files in the current directory::
$ fitsinfo *.fits
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import astropy.io.fits as fits
from astropy import log
def fitsinfo(filename):
"""
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file.
"""
try:
fits.info(filename)
except IOError as e:
log.error(str(e))
return
def main(args=None):
"""The main function called by the `fitsinfo` script."""
parser = argparse.ArgumentParser(
description=('Print a summary of the HDUs in a FITS file(s).'))
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files. '
'Wildcards are supported.')
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
fitsinfo(filename)
| joergdietrich/astropy | astropy/io/fits/scripts/fitsinfo.py | Python | bsd-3-clause | 1,784 |
"""
Tests of neo.io.igorproio
"""
import unittest
try:
import igor
HAVE_IGOR = True
except ImportError:
HAVE_IGOR = False
from neo.io.igorproio import IgorIO
from neo.test.iotest.common_io_test import BaseTestIO
@unittest.skipUnless(HAVE_IGOR, "requires igor")
class TestIgorIO(BaseTestIO, unittest.TestCase):
ioclass = IgorIO
entities_to_download = [
'igor'
]
entities_to_test = [
'igor/mac-version2.ibw',
'igor/win-version2.ibw'
]
if __name__ == "__main__":
unittest.main()
| samuelgarcia/python-neo | neo/test/iotest/test_igorio.py | Python | bsd-3-clause | 543 |
import numpy as np
from numpy.random import randn
from numpy.testing import assert_almost_equal, dec
from dipy.reconst.vec_val_sum import vec_val_vect
def make_vecs_vals(shape):
return randn(*(shape)), randn(*(shape[:-2] + shape[-1:]))
try:
np.einsum
except AttributeError:
with_einsum = dec.skipif(True, "Need einsum for benchmark")
else:
def with_einsum(f): return f
@with_einsum
def test_vec_val_vect():
for shape0 in ((10,), (100,), (10, 12), (12, 10, 5)):
for shape1 in ((3, 3), (4, 3), (3, 4)):
shape = shape0 + shape1
evecs, evals = make_vecs_vals(shape)
res1 = np.einsum('...ij,...j,...kj->...ik', evecs, evals, evecs)
assert_almost_equal(res1, vec_val_vect(evecs, evals))
def dumb_sum(vecs, vals):
N, rows, cols = vecs.shape
res2 = np.zeros((N, rows, rows))
for i in range(N):
Q = vecs[i]
L = vals[i]
res2[i] = np.dot(Q, np.dot(np.diag(L), Q.T))
return res2
def test_vec_val_vect_dumber():
for shape0 in ((10,), (100,)):
for shape1 in ((3, 3), (4, 3), (3, 4)):
shape = shape0 + shape1
evecs, evals = make_vecs_vals(shape)
res1 = dumb_sum(evecs, evals)
assert_almost_equal(res1, vec_val_vect(evecs, evals))
| nilgoyyou/dipy | dipy/reconst/tests/test_vec_val_vect.py | Python | bsd-3-clause | 1,302 |
"""
Sphinx plugin to run example scripts and create a gallery page.
Lightly modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
from seaborn.external import six
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
if six.PY3:
# Python 3 has no execfile
def execfile(filename, globals=None, locals=None):
with open(filename, "rb") as fp:
six.exec_(compile(fp.read(), filename, 'exec'), globals, locals)
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. raw:: html
<div class="col-md-9">
.. literalinclude:: {fname}
:lines: {end_line}-
.. raw:: html
</div>
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=275, height=275,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iter = lines.__iter__()
tokens = tokenize.generate_tokens(lambda: next(line_iter))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
| phobson/seaborn | doc/sphinxext/gallery_generator.py | Python | bsd-3-clause | 10,385 |
from functools import update_wrapper
from django.http import Http404, HttpResponseRedirect
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import logout as auth_logout, REDIRECT_FIELD_NAME
from django.contrib.contenttypes import views as contenttype_views
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin', app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
# Don't import the humongous validation code unless required
if admin_class and settings.DEBUG:
from django.contrib.admin.validation import validate
else:
validate = lambda model, adminclass: None
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Validate (which might be a no-op)
validate(admin_class, model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if LOGIN_FORM_KEY in request.POST and request.user.is_authenticated():
auth_logout(request)
if not self.has_permission(request):
if request.path == reverse('admin:logout',
current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut),
name='view_on_site'),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in six.iteritems(self._registry):
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name),
include(model_admin.urls))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_label': app_label,
'app_url': reverse('admin:app_list', kwargs={'app_label': app_label}, current_app=self.name),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = {
'title': _('Site administration'),
'app_list': app_list,
}
context.update(extra_context or {})
return TemplateResponse(request, self.index_template or
'admin/index.html', context,
current_app=self.name)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
}
context.update(extra_context or {})
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context, current_app=self.name)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| postrational/django | django/contrib/admin/sites.py | Python | bsd-3-clause | 18,932 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.mocktool import MockTool
class MockSheriffBot(object):
name = "mock-sheriff-bot"
watchers = [
"[email protected]",
]
def run_webkit_patch(self, args):
return "Created bug https://bugs.webkit.org/show_bug.cgi?id=36936\n"
class SheriffTest(unittest.TestCase):
def test_post_blame_comment_on_bug(self):
def run():
sheriff = Sheriff(MockTool(), MockSheriffBot())
builders = [
Builder("Foo", None),
Builder("Bar", None),
]
commit_info = Mock()
commit_info.bug_id = lambda: None
commit_info.revision = lambda: 4321
# Should do nothing with no bug_id
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
# Should try to post a comment to the bug, but MockTool.bugs does nothing.
commit_info.bug_id = lambda: 1234
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
expected_stderr = u"""MOCK bug comment: bug_id=1234, cc=['[email protected]']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['[email protected]']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['[email protected]']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
mock-test-2
--- End comment ---
"""
OutputCapture().assert_outputs(self, run, expected_stderr=expected_stderr)
| leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py | Python | bsd-3-clause | 3,776 |
#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common import lru_cache
class LRUCacheTest(unittest.TestCase):
def setUp(self):
self.lru = lru_cache.LRUCache(3)
self.lru['key_1'] = 'item_1'
self.lru['key_2'] = 'item_2'
self.lru['key_3'] = 'item_3'
self.lru2 = lru_cache.LRUCache(1)
self.lru2['key_1'] = 'item_1'
def test_items(self):
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
def test_put(self):
self.lru['key_4'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_4', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
def test_update(self):
self.lru['key_1']
self.lru['key_5'] = 'item_5'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_5', 'item_5')]))
def test_keys(self):
self.assertEqual(set(self.lru.keys()), set(['key_1', 'key_2', 'key_3']))
def test_delete(self):
del self.lru['key_1']
self.assertFalse('key_1' in self.lru)
def test_contain(self):
self.assertTrue('key_1' in self.lru)
self.assertFalse('key_4' in self.lru)
def test_values(self):
self.assertEqual(set(self.lru.values()), set(['item_1', 'item_2', 'item_3']))
def test_len(self):
self.assertEqual(len(self.lru), 3)
def test_size_one_pop(self):
self.lru2['key_2'] = 'item_2'
self.assertEqual(self.lru2.keys(), ['key_2'])
def test_size_one_delete(self):
del self.lru2['key_1']
self.assertFalse('key_1' in self.lru2)
def test_pop_error(self):
self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
del self.lru2['key_1']
self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
def test_get_middle_item(self):
self.lru['key_2']
self.lru['key_4'] = 'item_4'
self.lru['key_5'] = 'item_5'
self.assertEqual(set(self.lru.keys()), set(['key_2', 'key_4', 'key_5']))
def test_set_again(self):
self.lru['key_1'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
if __name__ == "__main__":
unittest.main()
| leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/common/lru_cache_unittest.py | Python | bsd-3-clause | 3,651 |
import re
import sublime
import sublime_plugin
class IndentListItemCommand(sublime_plugin.TextCommand):
bullet_pattern = r'([-+*]|([(]?(\d+|#|[a-y]|[A-Y]|[MDCLXVImdclxvi]+))([).]))'
bullet_pattern_re = re.compile(bullet_pattern)
line_pattern_re = re.compile(r'^\s*' + bullet_pattern)
spaces_re = re.compile(r'^\s*')
def run(self, edit, reverse=False):
for region in self.view.sel():
if region.a != region.b:
continue
line = self.view.line(region)
line_content = self.view.substr(line)
new_line = line_content
m = self.line_pattern_re.match(new_line)
if not m:
return
# Determine how to indent (tab or spaces)
tab_str = self.view.settings().get('tab_size', 4) * ' '
sep_str = ' ' if m.group(4) else ''
prev_line = self.view.line(sublime.Region(line.begin() - 1, line.begin() - 1))
prev_line_content = self.view.substr(prev_line)
prev_prev_line = self.view.line(sublime.Region(prev_line.begin() - 1, prev_line.begin() - 1))
prev_prev_line_content = self.view.substr(prev_prev_line)
if not reverse:
# Do the indentation
new_line = self.bullet_pattern_re.sub(tab_str + sep_str + r'\1', new_line)
# Insert the new item
if prev_line_content:
new_line = '\n' + new_line
else:
if not new_line.startswith(tab_str):
continue
# Do the unindentation
new_line = re.sub(tab_str + sep_str + self.bullet_pattern, r'\1', new_line)
# Insert the new item
if prev_line_content:
new_line = '\n' + new_line
else:
prev_spaces = self.spaces_re.match(prev_prev_line_content).group(0)
spaces = self.spaces_re.match(new_line).group(0)
if prev_spaces == spaces:
line = sublime.Region(line.begin() - 1, line.end())
endings = ['.', ')']
# Transform the bullet to the next/previous bullet type
if self.view.settings().get('list_indent_auto_switch_bullet', True):
bullets = self.view.settings().get('list_indent_bullets', ['*', '-', '+'])
def change_bullet(m):
bullet = m.group(1)
try:
return bullets[(bullets.index(bullet) + (1 if not reverse else -1)) % len(bullets)]
except ValueError:
pass
n = m.group(2)
ending = endings[(endings.index(m.group(4)) + (1 if not reverse else -1)) % len(endings)]
if n.isdigit():
return '${1:a}' + ending
elif n != '#':
return '${1:0}' + ending
return m.group(2) + ending
new_line = self.bullet_pattern_re.sub(change_bullet, new_line)
self.view.replace(edit, line, '')
self.view.run_command('insert_snippet', {'contents': new_line})
def is_enabled(self):
return bool(self.view.score_selector(self.view.sel()[0].a, 'text.restructuredtext'))
| Kronuz/sublime-rst-completion | indent_list_item.py | Python | bsd-3-clause | 3,382 |
"""Tools for working with virtualenv environments"""
import os
import sys
import subprocess
from pip.exceptions import BadCommand
from pip.log import logger
def restart_in_venv(venv, base, site_packages, args):
"""
Restart this script using the interpreter in the given virtual environment
"""
if base and not os.path.isabs(venv) and not venv.startswith('~'):
base = os.path.expanduser(base)
# ensure we have an abs basepath at this point:
# a relative one makes no sense (or does it?)
if os.path.isabs(base):
venv = os.path.join(base, venv)
if venv.startswith('~'):
venv = os.path.expanduser(venv)
if not os.path.exists(venv):
try:
import virtualenv
except ImportError:
print 'The virtual environment does not exist: %s' % venv
print 'and virtualenv is not installed, so a new environment cannot be created'
sys.exit(3)
print 'Creating new virtualenv environment in %s' % venv
virtualenv.logger = logger
logger.indent += 2
virtualenv.create_environment(venv, site_packages=site_packages)
if sys.platform == 'win32':
python = os.path.join(venv, 'Scripts', 'python.exe')
# check for bin directory which is used in buildouts
if not os.path.exists(python):
python = os.path.join(venv, 'bin', 'python.exe')
else:
python = os.path.join(venv, 'bin', 'python')
if not os.path.exists(python):
python = venv
if not os.path.exists(python):
raise BadCommand('Cannot find virtual environment interpreter at %s' % python)
base = os.path.dirname(os.path.dirname(python))
file = os.path.join(os.path.dirname(__file__), 'runner.py')
if file.endswith('.pyc'):
file = file[:-1]
proc = subprocess.Popen(
[python, file] + args + [base, '___VENV_RESTART___'])
proc.wait()
sys.exit(proc.returncode)
| BadDNA/anolis | web/env/lib/python2.6/site-packages/pip-0.7.2-py2.6.egg/pip/venv.py | Python | bsd-3-clause | 1,972 |
# -*- coding: utf-8 -*-
from uuid import uuid4
from gluon import current
def rlpcm_person_anonymize():
""" Rules to anonymize a case file """
auth = current.auth
s3db = current.s3db
ANONYMOUS = "-"
# Standard anonymizers
from s3db.pr import pr_address_anonymise as anonymous_address, \
pr_person_obscure_dob as obscure_dob
# Helper to produce an anonymous ID (pe_label)
anonymous_id = lambda record_id, f, v: "NN%s" % uuid4().hex[-8:].upper()
anonymous_code = lambda record_id, f, v: uuid4().hex
# Case Activity Default Closure
activity_closed = s3db.br_case_activity_default_status(closing=True)
# General rule for attachments
documents = ("doc_document", {
"key": "doc_id",
"match": "doc_id",
"fields": {"name": ("set", ANONYMOUS),
"file": "remove",
"url": "remove",
"comments": "remove",
},
"delete": True,
})
# Rule for direct offers (from the offerer perspective)
direct_offers = ("br_direct_offer", {
"key": "offer_id",
"match": "id",
"delete": True,
})
# Rules for user accounts
account = ("auth_user", {
"key": "id",
"match": "user_id",
"fields": {"id": auth.s3_anonymise_roles,
"first_name": ("set", "-"),
"last_name": "remove",
"email": anonymous_code,
"organisation_id": "remove",
"password": auth.s3_anonymise_password,
"deleted": ("set", True),
},
})
# Rules
rules = [
# Rules to remove PID from person record and case file
{"name": "default",
"title": "Names, IDs, Reference Numbers, Contact Information, Addresses",
"fields": {"first_name": ("set", ANONYMOUS),
"last_name": ("set", ANONYMOUS),
"pe_label": anonymous_id,
"date_of_birth": obscure_dob,
"comments": "remove",
},
"cascade": [("br_case", {
"key": "person_id",
"match": "id",
"fields": {"comments": "remove",
},
"cascade": [documents,
],
}),
("pr_contact", {
"key": "pe_id",
"match": "pe_id",
"fields": {"contact_description": "remove",
"value": ("set", ""),
"comments": "remove",
},
"delete": True,
}),
("pr_contact_emergency", {
"key": "pe_id",
"match": "pe_id",
"fields": {"name": ("set", ANONYMOUS),
"relationship": "remove",
"phone": "remove",
"comments": "remove",
},
"delete": True,
}),
("pr_address", {
"key": "pe_id",
"match": "pe_id",
"fields": {"location_id": anonymous_address,
"comments": "remove",
},
}),
("pr_person_details", {
"key": "person_id",
"match": "id",
"fields": {"education": "remove",
"occupation": "remove",
},
}),
("pr_image", {
"key": "pe_id",
"match": "pe_id",
"fields": {"image": "remove",
"url": "remove",
"description": "remove",
},
"delete": True,
}),
("hrm_human_resource", {
"key": "person_id",
"match": "id",
"fields": {"status": ("set", 2),
"site_id": "remove",
"comments": "remove",
},
}),
],
},
# Rules to remove PID from activities and offers
{"name": "activities",
"title": "Needs Reports and Offers of Assistance",
"cascade": [("br_case_activity", {
"key": "person_id",
"match": "id",
"fields": {"location_id": anonymous_address,
"subject": ("set", ANONYMOUS),
"need_details": "remove",
"activity_details": "remove",
"outcome": "remove",
"comments": "remove",
"status_id": ("set", activity_closed),
},
"cascade": [documents,
],
}),
("br_assistance_offer", {
"key": "pe_id",
"match": "pe_id",
"fields": {"name": ("set", ANONYMOUS),
"description": "remove",
"capacity": "remove",
"location_id": anonymous_address,
"contact_name": "remove",
"contact_phone": "remove",
"contact_email": "remove",
"availability": ("set", "RTD"),
"comments": "remove",
},
"cascade": [direct_offers,
],
}),
],
},
# Rules to unlink and remove user account
{"name": "account",
"title": "User Account",
"cascade": [("pr_person_user", {
"key": "pe_id",
"match": "pe_id",
"cascade": [account,
],
"delete": True,
}),
],
},
]
return rules
| flavour/eden | modules/templates/BRCMS/RLP/anonymize.py | Python | mit | 7,359 |
"""
Strictly internal utilities.
"""
from __future__ import absolute_import, division, print_function
from twisted.web.client import HTTPConnectionPool
def default_reactor(reactor):
"""
Return the specified reactor or the default.
"""
if reactor is None:
from twisted.internet import reactor
return reactor
_global_pool = [None]
def get_global_pool():
return _global_pool[0]
def set_global_pool(pool):
_global_pool[0] = pool
def default_pool(reactor, pool, persistent):
"""
Return the specified pool or a a pool with the specified reactor and
persistence.
"""
reactor = default_reactor(reactor)
if pool is not None:
return pool
if persistent is False:
return HTTPConnectionPool(reactor, persistent=persistent)
if get_global_pool() is None:
set_global_pool(HTTPConnectionPool(reactor, persistent=True))
return get_global_pool()
| glyph/treq | treq/_utils.py | Python | mit | 940 |
from __future__ import absolute_import
from django.core.exceptions import PermissionDenied
from django.db import models
from django.contrib.auth import authenticate
from django.contrib.sites.models import Site
from django.utils.encoding import python_2_unicode_compatible
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
import allauth.app_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import get_next_redirect_url, setup_user_email
from allauth.utils import (get_user_model, serialize_instance,
deserialize_instance)
from . import app_settings
from . import providers
from .fields import JSONField
from ..utils import get_request_param
class SocialAppManager(models.Manager):
def get_current(self, provider):
site = Site.objects.get_current()
return self.get(sites__id=site.id,
provider=provider)
@python_2_unicode_compatible
class SocialApp(models.Model):
objects = SocialAppManager()
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
name = models.CharField(verbose_name=_('name'),
max_length=40)
client_id = models.CharField(verbose_name=_('client id'),
max_length=100,
help_text=_('App ID, or consumer key'))
secret = models.CharField(verbose_name=_('secret key'),
max_length=100,
help_text=_('API secret, client secret, or'
' consumer secret'))
key = models.CharField(verbose_name=_('key'),
max_length=100,
blank=True,
help_text=_('Key'))
# Most apps can be used across multiple domains, therefore we use
# a ManyToManyField. Note that Facebook requires an app per domain
# (unless the domains share a common base name).
# blank=True allows for disabling apps without removing them
sites = models.ManyToManyField(Site, blank=True)
class Meta:
verbose_name = _('social application')
verbose_name_plural = _('social applications')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SocialAccount(models.Model):
user = models.ForeignKey(allauth.app_settings.USER_MODEL)
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
# Just in case you're wondering if an OpenID identity URL is going
# to fit in a 'uid':
#
# Ideally, URLField(max_length=1024, unique=True) would be used
# for identity. However, MySQL has a max_length limitation of 255
# for URLField. How about models.TextField(unique=True) then?
# Well, that won't work either for MySQL due to another bug[1]. So
# the only way out would be to drop the unique constraint, or
# switch to shorter identity URLs. Opted for the latter, as [2]
# suggests that identity URLs are supposed to be short anyway, at
# least for the old spec.
#
# [1] http://code.djangoproject.com/ticket/2495.
# [2] http://openid.net/specs/openid-authentication-1_1.html#limits
uid = models.CharField(verbose_name=_('uid'), max_length=255)
last_login = models.DateTimeField(verbose_name=_('last login'),
auto_now=True)
date_joined = models.DateTimeField(verbose_name=_('date joined'),
auto_now_add=True)
extra_data = JSONField(verbose_name=_('extra data'), default='{}')
class Meta:
unique_together = ('provider', 'uid')
verbose_name = _('social account')
verbose_name_plural = _('social accounts')
def authenticate(self):
return authenticate(account=self)
def __str__(self):
return force_text(self.user)
def get_profile_url(self):
return self.get_provider_account().get_profile_url()
def get_avatar_url(self):
return self.get_provider_account().get_avatar_url()
def get_provider(self):
return providers.registry.by_id(self.provider)
def get_provider_account(self):
return self.get_provider().wrap_account(self)
@python_2_unicode_compatible
class SocialToken(models.Model):
app = models.ForeignKey(SocialApp)
account = models.ForeignKey(SocialAccount)
token = models \
.TextField(verbose_name=_('token'),
help_text=_('"oauth_token" (OAuth1) or access token'
' (OAuth2)'))
token_secret = models \
.TextField(blank=True,
verbose_name=_('token secret'),
help_text=_('"oauth_token_secret" (OAuth1) or refresh'
' token (OAuth2)'))
expires_at = models.DateTimeField(blank=True, null=True,
verbose_name=_('expires at'))
class Meta:
unique_together = ('app', 'account')
verbose_name = _('social application token')
verbose_name_plural = _('social application tokens')
def __str__(self):
return self.token
class SocialLogin(object):
"""
Represents a social user that is in the process of being logged
in. This consists of the following information:
`account` (`SocialAccount` instance): The social account being
logged in. Providers are not responsible for checking whether or
not an account already exists or not. Therefore, a provider
typically creates a new (unsaved) `SocialAccount` instance. The
`User` instance pointed to by the account (`account.user`) may be
prefilled by the provider for use as a starting point later on
during the signup process.
`token` (`SocialToken` instance): An optional access token token
that results from performing a successful authentication
handshake.
`state` (`dict`): The state to be preserved during the
authentication handshake. Note that this state may end up in the
url -- do not put any secrets in here. It currently only contains
the url to redirect to after login.
`email_addresses` (list of `EmailAddress`): Optional list of
e-mail addresses retrieved from the provider.
"""
def __init__(self, user=None, account=None, token=None,
email_addresses=[]):
if token:
assert token.account is None or token.account == account
self.token = token
self.user = user
self.account = account
self.email_addresses = email_addresses
self.state = {}
def connect(self, request, user):
self.user = user
self.save(request, connect=True)
def serialize(self):
ret = dict(account=serialize_instance(self.account),
user=serialize_instance(self.user),
state=self.state,
email_addresses=[serialize_instance(ea)
for ea in self.email_addresses])
if self.token:
ret['token'] = serialize_instance(self.token)
return ret
@classmethod
def deserialize(cls, data):
account = deserialize_instance(SocialAccount, data['account'])
user = deserialize_instance(get_user_model(), data['user'])
if 'token' in data:
token = deserialize_instance(SocialToken, data['token'])
else:
token = None
email_addresses = []
for ea in data['email_addresses']:
email_address = deserialize_instance(EmailAddress, ea)
email_addresses.append(email_address)
ret = SocialLogin()
ret.token = token
ret.account = account
ret.user = user
ret.email_addresses = email_addresses
ret.state = data['state']
return ret
def save(self, request, connect=False):
"""
Saves a new account. Note that while the account is new,
the user may be an existing one (when connecting accounts)
"""
assert not self.is_existing
user = self.user
user.save()
self.account.user = user
self.account.save()
if app_settings.STORE_TOKENS and self.token:
self.token.account = self.account
self.token.save()
if connect:
# TODO: Add any new email addresses automatically?
pass
else:
setup_user_email(request, user, self.email_addresses)
@property
def is_existing(self):
"""
Account is temporary, not yet backed by a database record.
"""
return self.account.pk
def lookup(self):
"""
Lookup existing account, if any.
"""
assert not self.is_existing
try:
a = SocialAccount.objects.get(provider=self.account.provider,
uid=self.account.uid)
# Update account
a.extra_data = self.account.extra_data
self.account = a
self.user = self.account.user
a.save()
# Update token
if app_settings.STORE_TOKENS and self.token:
assert not self.token.pk
try:
t = SocialToken.objects.get(account=self.account,
app=self.token.app)
t.token = self.token.token
if self.token.token_secret:
# only update the refresh token if we got one
# many oauth2 providers do not resend the refresh token
t.token_secret = self.token.token_secret
t.expires_at = self.token.expires_at
t.save()
self.token = t
except SocialToken.DoesNotExist:
self.token.account = a
self.token.save()
except SocialAccount.DoesNotExist:
pass
def get_redirect_url(self, request):
url = self.state.get('next')
return url
@classmethod
def state_from_request(cls, request):
state = {}
next_url = get_next_redirect_url(request)
if next_url:
state['next'] = next_url
state['process'] = get_request_param(request, 'process', 'login')
state['scope'] = get_request_param(request, 'scope', '')
state['auth_params'] = get_request_param(request, 'auth_params', '')
return state
@classmethod
def stash_state(cls, request):
state = cls.state_from_request(request)
verifier = get_random_string()
request.session['socialaccount_state'] = (state, verifier)
return verifier
@classmethod
def unstash_state(cls, request):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier = request.session.pop('socialaccount_state')
return state
@classmethod
def verify_and_unstash_state(cls, request, verifier):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier2 = request.session.pop('socialaccount_state')
if verifier != verifier2:
raise PermissionDenied()
return state
| tejesh95/Zubio.in | zubio/allauth/socialaccount/models.py | Python | mit | 11,718 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test Qt creation from a copied empty environment.
"""
import TestSCons
test = TestSCons.TestSCons()
test.Qt_dummy_installation('qt')
test.write('SConstruct', """\
orig = Environment()
env = orig.Clone(QTDIR = r'%s',
QT_LIB = r'%s',
QT_MOC = r'%s',
QT_UIC = r'%s',
tools=['qt'])
env.Program('main', 'main.cpp', CPPDEFINES=['FOO'], LIBS=[])
""" % (test.QT, test.QT_LIB, test.QT_MOC, test.QT_UIC))
test.write('main.cpp', r"""
#include "foo6.h"
int main() { foo6(); return 0; }
""")
test.write(['qt', 'include', 'foo6.h'], """\
#include <stdio.h>
void
foo6(void)
{
#ifdef FOO
printf("qt/include/foo6.h\\n");
#endif
}
""")
# we can receive warnings about a non detected qt (empty QTDIR)
# these are not critical, but may be annoying.
test.run(stderr=None)
test.run(program = test.workpath('main' + TestSCons._exe),
stderr = None,
stdout = 'qt/include/foo6.h\n')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| timj/scons | test/QT/empty-env.py | Python | mit | 2,252 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/Place/_HasNoLatOrLon.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasNoLatOrLon
#
#-------------------------------------------------------------------------
class HasNoLatOrLon(Rule):
"""Rule that checks if Latitude or Longitude are not given"""
labels = []
name = _('Places with no latitude or longitude given')
description = _("Matches places with empty latitude or longitude")
category = _('Position filters')
def apply(self,db,place):
if place.get_latitude().strip and place.get_longitude().strip():
return False
return True
| sam-m888/gramps | gramps/gen/filters/rules/place/_hasnolatorlon.py | Python | gpl-2.0 | 1,902 |
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Copyright (C) 2008-2010 Olivier Tilloy <[email protected]>
#
# This file is part of the pyexiv2 distribution.
#
# pyexiv2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# pyexiv2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyexiv2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA.
#
# Author: Olivier Tilloy <[email protected]>
#
# ******************************************************************************
import unittest
from pyexiv2.utils import Rational
class TestRational(unittest.TestCase):
def test_constructor(self):
r = Rational(2, 1)
self.assertEqual(r.numerator, 2)
self.assertEqual(r.denominator, 1)
self.assertRaises(ZeroDivisionError, Rational, 1, 0)
def test_read_only(self):
r = Rational(3, 4)
try:
r.numerator = 5
except AttributeError:
pass
else:
self.fail('Numerator is not read-only.')
try:
r.denominator = 5
except AttributeError:
pass
else:
self.fail('Denominator is not read-only.')
def test_match_string(self):
self.assertEqual(Rational.match_string('4/3'), (4, 3))
self.assertEqual(Rational.match_string('-4/3'), (-4, 3))
self.assertEqual(Rational.match_string('0/3'), (0, 3))
self.assertEqual(Rational.match_string('0/0'), (0, 0))
self.assertRaises(ValueError, Rational.match_string, '+3/5')
self.assertRaises(ValueError, Rational.match_string, '3 / 5')
self.assertRaises(ValueError, Rational.match_string, '3/-5')
self.assertRaises(ValueError, Rational.match_string, 'invalid')
def test_from_string(self):
self.assertEqual(Rational.from_string('4/3'), Rational(4, 3))
self.assertEqual(Rational.from_string('-4/3'), Rational(-4, 3))
self.assertRaises(ValueError, Rational.from_string, '+3/5')
self.assertRaises(ValueError, Rational.from_string, '3 / 5')
self.assertRaises(ValueError, Rational.from_string, '3/-5')
self.assertRaises(ValueError, Rational.from_string, 'invalid')
self.assertRaises(ZeroDivisionError, Rational.from_string, '1/0')
self.assertRaises(ZeroDivisionError, Rational.from_string, '0/0')
def test_to_string(self):
self.assertEqual(str(Rational(3, 5)), '3/5')
self.assertEqual(str(Rational(-3, 5)), '-3/5')
def test_repr(self):
self.assertEqual(repr(Rational(3, 5)), 'Rational(3, 5)')
self.assertEqual(repr(Rational(-3, 5)), 'Rational(-3, 5)')
self.assertEqual(repr(Rational(0, 3)), 'Rational(0, 3)')
def test_to_float(self):
self.assertEqual(Rational(3, 6).to_float(), 0.5)
self.assertEqual(Rational(11, 11).to_float(), 1.0)
self.assertEqual(Rational(-2, 8).to_float(), -0.25)
self.assertEqual(Rational(0, 3).to_float(), 0.0)
def test_equality(self):
r1 = Rational(2, 1)
r2 = Rational(2, 1)
r3 = Rational(8, 4)
r4 = Rational(3, 2)
self.assertEqual(r1, r2)
self.assertEqual(r1, r3)
self.assertNotEqual(r1, r4)
| pridkett/pyexiv2 | test/rational.py | Python | gpl-2.0 | 3,750 |
#!/usr/bin/env python2
# coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| idl3r/Ropper | ropperapp/loaders/pe_intern/__init__.py | Python | gpl-2.0 | 729 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Find all the available input interfaces and try to initialize them.
"""
import os
import glob
import logging
from ..inputreaderinterface import InputReaderInterface
__author__ = 'Bitcraze AB'
__all__ = ['InputInterface']
logger = logging.getLogger(__name__)
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for f in
glob.glob(os.path.dirname(__file__) + "/[A-Za-z]*.py")]
if len(found_interfaces) == 0:
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for
f in glob.glob(os.path.dirname(__file__) +
"/[A-Za-z]*.pyc")]
logger.info("Found interfaces: {}".format(found_interfaces))
initialized_interfaces = []
available_interfaces = []
for interface in found_interfaces:
try:
module = __import__(interface, globals(), locals(), [interface], 1)
main_name = getattr(module, "MODULE_MAIN")
initialized_interfaces.append(getattr(module, main_name)())
logger.info("Successfully initialized [{}]".format(interface))
except Exception as e:
logger.info("Could not initialize [{}]: {}".format(interface, e))
def devices():
# Todo: Support rescanning and adding/removing devices
if len(available_interfaces) == 0:
for reader in initialized_interfaces:
devs = reader.devices()
for dev in devs:
available_interfaces.append(InputInterface(
dev["name"], dev["id"], reader))
return available_interfaces
class InputInterface(InputReaderInterface):
def __init__(self, dev_name, dev_id, dev_reader):
super(InputInterface, self).__init__(dev_name, dev_id, dev_reader)
# These devices cannot be mapped and configured
self.supports_mapping = False
# Ask the reader if it wants to limit
# roll/pitch/yaw/thrust for all devices
self.limit_rp = dev_reader.limit_rp
self.limit_thrust = dev_reader.limit_thrust
self.limit_yaw = dev_reader.limit_yaw
def open(self):
self._reader.open(self.id)
def close(self):
self._reader.close(self.id)
def read(self, include_raw=False):
mydata = self._reader.read(self.id)
# Merge interface returned data into InputReader Data Item
for key in list(mydata.keys()):
self.data.set(key, mydata[key])
return self.data
| manojngb/Crazyfly_simple_lift | src/cfclient/utils/input/inputinterfaces/__init__.py | Python | gpl-2.0 | 3,515 |
# Partname: ATmega644A
# generated automatically, do not edit
MCUREGS = {
'ADCSRB': '&123',
'ADCSRB_ACME': '$40',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'UDR0': '&198',
'UCSR0A': '&192',
'UCSR0A_RXC0': '$80',
'UCSR0A_TXC0': '$40',
'UCSR0A_UDRE0': '$20',
'UCSR0A_FE0': '$10',
'UCSR0A_DOR0': '$08',
'UCSR0A_UPE0': '$04',
'UCSR0A_U2X0': '$02',
'UCSR0A_MPCM0': '$01',
'UCSR0B': '&193',
'UCSR0B_RXCIE0': '$80',
'UCSR0B_TXCIE0': '$40',
'UCSR0B_UDRIE0': '$20',
'UCSR0B_RXEN0': '$10',
'UCSR0B_TXEN0': '$08',
'UCSR0B_UCSZ02': '$04',
'UCSR0B_RXB80': '$02',
'UCSR0B_TXB80': '$01',
'UCSR0C': '&194',
'UCSR0C_UMSEL0': '$C0',
'UCSR0C_UPM0': '$30',
'UCSR0C_USBS0': '$08',
'UCSR0C_UCSZ0': '$06',
'UCSR0C_UCPOL0': '$01',
'UBRR0': '&196',
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'OCR0B': '&72',
'OCR0A': '&71',
'TCNT0': '&70',
'TCCR0B': '&69',
'TCCR0B_FOC0A': '$80',
'TCCR0B_FOC0B': '$40',
'TCCR0B_WGM02': '$08',
'TCCR0B_CS0': '$07',
'TCCR0A': '&68',
'TCCR0A_COM0A': '$C0',
'TCCR0A_COM0B': '$30',
'TCCR0A_WGM0': '$03',
'TIMSK0': '&110',
'TIMSK0_OCIE0B': '$04',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0B': '$04',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSRSYNC': '$01',
'TIMSK2': '&112',
'TIMSK2_OCIE2B': '$04',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2B': '$04',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'TCCR2A': '&176',
'TCCR2A_COM2A': '$C0',
'TCCR2A_COM2B': '$30',
'TCCR2A_WGM2': '$03',
'TCCR2B': '&177',
'TCCR2B_FOC2A': '$80',
'TCCR2B_FOC2B': '$40',
'TCCR2B_WGM22': '$08',
'TCCR2B_CS2': '$07',
'TCNT2': '&178',
'OCR2B': '&180',
'OCR2A': '&179',
'ASSR': '&182',
'ASSR_EXCLK': '$40',
'ASSR_AS2': '$20',
'ASSR_TCN2UB': '$10',
'ASSR_OCR2AUB': '$08',
'ASSR_OCR2BUB': '$04',
'ASSR_TCR2AUB': '$02',
'ASSR_TCR2BUB': '$01',
'WDTCSR': '&96',
'WDTCSR_WDIF': '$80',
'WDTCSR_WDIE': '$40',
'WDTCSR_WDP': '$27',
'WDTCSR_WDCE': '$10',
'WDTCSR_WDE': '$08',
'OCDR': '&81',
'MCUCR': '&85',
'MCUCR_JTD': '$80',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_SIGRD': '$20',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'EICRA': '&105',
'EICRA_ISC2': '$30',
'EICRA_ISC1': '$0C',
'EICRA_ISC0': '$03',
'EIMSK': '&61',
'EIMSK_INT': '$07',
'EIFR': '&60',
'EIFR_INTF': '$07',
'PCMSK3': '&115',
'PCMSK3_PCINT': '$FF',
'PCMSK2': '&109',
'PCMSK2_PCINT': '$FF',
'PCMSK1': '&108',
'PCMSK1_PCINT': '$FF',
'PCMSK0': '&107',
'PCMSK0_PCINT': '$FF',
'PCIFR': '&59',
'PCIFR_PCIF': '$0F',
'PCICR': '&104',
'PCICR_PCIE': '$0F',
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADC': '&120',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'ICR1': '&134',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EEPM': '$30',
'EECR_EERIE': '$08',
'EECR_EEMPE': '$04',
'EECR_EEPE': '$02',
'EECR_EERE': '$01',
'TWAMR': '&189',
'TWAMR_TWAM': '$FE',
'TWBR': '&184',
'TWCR': '&188',
'TWCR_TWINT': '$80',
'TWCR_TWEA': '$40',
'TWCR_TWSTA': '$20',
'TWCR_TWSTO': '$10',
'TWCR_TWWC': '$08',
'TWCR_TWEN': '$04',
'TWCR_TWIE': '$01',
'TWSR': '&185',
'TWSR_TWS': '$F8',
'TWSR_TWPS': '$03',
'TWDR': '&187',
'TWAR': '&186',
'TWAR_TWA': '$FE',
'TWAR_TWGCE': '$01',
'UDR1': '&206',
'UCSR1A': '&200',
'UCSR1A_RXC1': '$80',
'UCSR1A_TXC1': '$40',
'UCSR1A_UDRE1': '$20',
'UCSR1A_FE1': '$10',
'UCSR1A_DOR1': '$08',
'UCSR1A_UPE1': '$04',
'UCSR1A_U2X1': '$02',
'UCSR1A_MPCM1': '$01',
'UCSR1B': '&201',
'UCSR1B_RXCIE1': '$80',
'UCSR1B_TXCIE1': '$40',
'UCSR1B_UDRIE1': '$20',
'UCSR1B_RXEN1': '$10',
'UCSR1B_TXEN1': '$08',
'UCSR1B_UCSZ12': '$04',
'UCSR1B_RXB81': '$02',
'UCSR1B_TXB81': '$01',
'UCSR1C': '&202',
'UCSR1C_UMSEL1': '$C0',
'UCSR1C_UPM1': '$30',
'UCSR1C_USBS1': '$08',
'UCSR1C_UCSZ1': '$06',
'UCSR1C_UCPOL1': '$01',
'UBRR1': '&204',
'SPDR': '&78',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'GPIOR2': '&75',
'GPIOR2_GPIOR': '$FF',
'GPIOR1': '&74',
'GPIOR1_GPIOR': '$FF',
'GPIOR0': '&62',
'GPIOR0_GPIOR07': '$80',
'GPIOR0_GPIOR06': '$40',
'GPIOR0_GPIOR05': '$20',
'GPIOR0_GPIOR04': '$10',
'GPIOR0_GPIOR03': '$08',
'GPIOR0_GPIOR02': '$04',
'GPIOR0_GPIOR01': '$02',
'GPIOR0_GPIOR00': '$01',
'PRR0': '&100',
'PRR0_PRTWI': '$80',
'PRR0_PRTIM2': '$40',
'PRR0_PRTIM0': '$20',
'PRR0_PRUSART': '$12',
'PRR0_PRTIM1': '$08',
'PRR0_PRSPI': '$04',
'PRR0_PRADC': '$01',
'INT0Addr': '2',
'INT1Addr': '4',
'INT2Addr': '6',
'PCINT0Addr': '8',
'PCINT1Addr': '10',
'PCINT2Addr': '12',
'PCINT3Addr': '14',
'WDTAddr': '16',
'TIMER2_COMPAAddr': '18',
'TIMER2_COMPBAddr': '20',
'TIMER2_OVFAddr': '22',
'TIMER1_CAPTAddr': '24',
'TIMER1_COMPAAddr': '26',
'TIMER1_COMPBAddr': '28',
'TIMER1_OVFAddr': '30',
'TIMER0_COMPAAddr': '32',
'TIMER0_COMPBAddr': '34',
'TIMER0_OVFAddr': '36',
'SPI__STCAddr': '38',
'USART0__RXAddr': '40',
'USART0__UDREAddr': '42',
'USART0__TXAddr': '44',
'ANALOG_COMPAddr': '46',
'ADCAddr': '48',
'EE_READYAddr': '50',
'TWIAddr': '52',
'SPM_READYAddr': '54',
'USART1_RXAddr': '56',
'USART1_UDREAddr': '58',
'USART1_TXAddr': '60'
} | hickey/amforth | core/devices/atmega644a/device.py | Python | gpl-2.0 | 7,375 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, QObject, QSettings, QByteArray, SIGNAL, pyqtSignal
from PyQt4.QtGui import QDialog, QWidget, QAction, QKeySequence, \
QDialogButtonBox, QApplication, QCursor, QMessageBox, QClipboard, QInputDialog, QIcon
from PyQt4.Qsci import QsciAPIs
from qgis.core import QgsProject
from .db_plugins.plugin import BaseError
from .dlg_db_error import DlgDbError
from .dlg_query_builder import QueryBuilderDlg
try:
from qgis.gui import QgsCodeEditorSQL
except:
from .sqledit import SqlEdit
from qgis import gui
gui.QgsCodeEditorSQL = SqlEdit
from .ui.ui_DlgSqlWindow import Ui_DbManagerDlgSqlWindow as Ui_Dialog
import re
class DlgSqlWindow(QWidget, Ui_Dialog):
nameChanged = pyqtSignal(str)
def __init__(self, iface, db, parent=None):
QWidget.__init__(self, parent)
self.iface = iface
self.db = db
self.setupUi(self)
self.setWindowTitle(
u"%s - %s [%s]" % (self.windowTitle(), db.connection().connectionName(), db.connection().typeNameString()))
self.defaultLayerName = 'QueryLayer'
self.editSql.setFocus()
self.editSql.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.initCompleter()
# allow to copy results
copyAction = QAction("copy", self)
self.viewResult.addAction(copyAction)
copyAction.setShortcuts(QKeySequence.Copy)
copyAction.triggered.connect(self.copySelectedResults)
self.btnExecute.clicked.connect(self.executeSql)
self.btnClear.clicked.connect(self.clearSql)
self.presetStore.clicked.connect(self.storePreset)
self.presetDelete.clicked.connect(self.deletePreset)
self.presetCombo.activated[str].connect(self.loadPreset)
self.presetCombo.activated[str].connect(self.presetName.setText)
self.updatePresetsCombobox()
# hide the load query as layer if feature is not supported
self._loadAsLayerAvailable = self.db.connector.hasCustomQuerySupport()
self.loadAsLayerGroup.setVisible(self._loadAsLayerAvailable)
if self._loadAsLayerAvailable:
self.layerTypeWidget.hide() # show if load as raster is supported
self.loadLayerBtn.clicked.connect(self.loadSqlLayer)
self.getColumnsBtn.clicked.connect(self.fillColumnCombos)
self.loadAsLayerGroup.toggled.connect(self.loadAsLayerToggled)
self.loadAsLayerToggled(False)
self._createViewAvailable = self.db.connector.hasCreateSpatialViewSupport()
self.btnCreateView.setVisible(self._createViewAvailable)
if self._createViewAvailable:
self.btnCreateView.clicked.connect(self.createView)
self.queryBuilderFirst = True
self.queryBuilderBtn.setIcon(QIcon(":/db_manager/icons/sql.gif"))
self.queryBuilderBtn.clicked.connect(self.displayQueryBuilder)
self.presetName.textChanged.connect(self.nameChanged)
def updatePresetsCombobox(self):
self.presetCombo.clear()
names = []
entries = QgsProject.instance().subkeyList('DBManager', 'savedQueries')
for entry in entries:
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/' + entry + '/name')[0]
names.append(name)
for name in sorted(names):
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(-1)
def storePreset(self):
query = self._getSqlQuery()
if query == "":
return
name = self.presetName.text()
QgsProject.instance().writeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/name', name)
QgsProject.instance().writeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/query', query)
index = self.presetCombo.findText(name)
if index == -1:
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(self.presetCombo.count() - 1)
else:
self.presetCombo.setCurrentIndex(index)
def deletePreset(self):
name = self.presetCombo.currentText()
QgsProject.instance().removeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()))
self.presetCombo.removeItem(self.presetCombo.findText(name))
self.presetCombo.setCurrentIndex(-1)
def loadPreset(self, name):
query = QgsProject.instance().readEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/query')[0]
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/name')[0]
self.editSql.setText(query)
def loadAsLayerToggled(self, checked):
self.loadAsLayerGroup.setChecked(checked)
self.loadAsLayerWidget.setVisible(checked)
def clearSql(self):
self.editSql.clear()
self.editSql.setFocus()
def executeSql(self):
sql = self._getSqlQuery()
if sql == "":
return
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# delete the old model
old_model = self.viewResult.model()
self.viewResult.setModel(None)
if old_model:
old_model.deleteLater()
self.uniqueCombo.clear()
self.geomCombo.clear()
try:
# set the new model
model = self.db.sqlResultModel(sql, self)
self.viewResult.setModel(model)
self.lblResult.setText(self.tr("%d rows, %.1f seconds") % (model.affectedRows(), model.secs()))
except BaseError as e:
QApplication.restoreOverrideCursor()
DlgDbError.showError(e, self)
return
cols = sorted(self.viewResult.model().columnNames())
self.uniqueCombo.addItems(cols)
self.geomCombo.addItems(cols)
self.update()
QApplication.restoreOverrideCursor()
def loadSqlLayer(self):
hasUniqueField = self.uniqueColumnCheck.checkState() == Qt.Checked
if hasUniqueField:
uniqueFieldName = self.uniqueCombo.currentText()
else:
uniqueFieldName = None
hasGeomCol = self.hasGeometryCol.checkState() == Qt.Checked
if hasGeomCol:
geomFieldName = self.geomCombo.currentText()
else:
geomFieldName = None
query = self._getSqlQuery()
if query == "":
return
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
from qgis.core import QgsMapLayer, QgsMapLayerRegistry
layerType = QgsMapLayer.VectorLayer if self.vectorRadio.isChecked() else QgsMapLayer.RasterLayer
# get a new layer name
names = []
for layer in QgsMapLayerRegistry.instance().mapLayers().values():
names.append(layer.name())
layerName = self.layerNameEdit.text()
if layerName == "":
layerName = self.defaultLayerName
newLayerName = layerName
index = 1
while newLayerName in names:
index += 1
newLayerName = u"%s_%d" % (layerName, index)
# create the layer
layer = self.db.toSqlLayer(query, geomFieldName, uniqueFieldName, newLayerName, layerType,
self.avoidSelectById.isChecked())
if layer.isValid():
QgsMapLayerRegistry.instance().addMapLayers([layer], True)
QApplication.restoreOverrideCursor()
def fillColumnCombos(self):
query = self._getSqlQuery()
if query == "":
return
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.uniqueCombo.clear()
self.geomCombo.clear()
# get a new alias
aliasIndex = 0
while True:
alias = "_%s__%d" % ("subQuery", aliasIndex)
escaped = re.compile('\\b("?)' + re.escape(alias) + '\\1\\b')
if not escaped.search(query):
break
aliasIndex += 1
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
# get all the columns
cols = []
connector = self.db.connector
sql = u"SELECT * FROM (%s\n) AS %s LIMIT 0" % (unicode(query), connector.quoteId(alias))
c = None
try:
c = connector._execute(None, sql)
cols = connector._get_cursor_columns(c)
except BaseError as e:
QApplication.restoreOverrideCursor()
DlgDbError.showError(e, self)
return
finally:
if c:
c.close()
del c
# get sensible default columns. do this before sorting in case there's hints in the column order (eg, id is more likely to be first)
try:
defaultGeomCol = next(col for col in cols if col in ['geom', 'geometry', 'the_geom', 'way'])
except:
defaultGeomCol = None
try:
defaultUniqueCol = [col for col in cols if 'id' in col][0]
except:
defaultUniqueCol = None
cols.sort()
self.uniqueCombo.addItems(cols)
self.geomCombo.addItems(cols)
# set sensible default columns
try:
self.geomCombo.setCurrentIndex(cols.index(defaultGeomCol))
except:
pass
try:
self.uniqueCombo.setCurrentIndex(cols.index(defaultUniqueCol))
except:
pass
QApplication.restoreOverrideCursor()
def copySelectedResults(self):
if len(self.viewResult.selectedIndexes()) <= 0:
return
model = self.viewResult.model()
# convert to string using tab as separator
text = model.headerToString("\t")
for idx in self.viewResult.selectionModel().selectedRows():
text += "\n" + model.rowToString(idx.row(), "\t")
QApplication.clipboard().setText(text, QClipboard.Selection)
QApplication.clipboard().setText(text, QClipboard.Clipboard)
def initCompleter(self):
dictionary = None
if self.db:
dictionary = self.db.connector.getSqlDictionary()
if not dictionary:
# use the generic sql dictionary
from .sql_dictionary import getSqlDictionary
dictionary = getSqlDictionary()
wordlist = []
for name, value in dictionary.iteritems():
wordlist += value # concat lists
wordlist = list(set(wordlist)) # remove duplicates
api = QsciAPIs(self.editSql.lexer())
for word in wordlist:
api.add(word)
api.prepare()
self.editSql.lexer().setAPIs(api)
def displayQueryBuilder(self):
dlg = QueryBuilderDlg(self.iface, self.db, self, reset=self.queryBuilderFirst)
self.queryBuilderFirst = False
r = dlg.exec_()
if r == QDialog.Accepted:
self.editSql.setText(dlg.query)
def createView(self):
name, ok = QInputDialog.getText(None, "View name", "View name")
if ok:
try:
self.db.connector.createSpatialView(name, self._getSqlQuery())
except BaseError as e:
DlgDbError.showError(e, self)
def _getSqlQuery(self):
sql = self.editSql.selectedText()
if len(sql) == 0:
sql = self.editSql.text()
return sql
| jarped/QGIS | python/plugins/db_manager/dlg_sql_window.py | Python | gpl-2.0 | 12,633 |
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework import serializers
from nodeshot.core.base.serializers import ModelValidationSerializer
from nodeshot.community.profiles.serializers import ProfileRelationSerializer
from .models import Comment, Vote, Rating, NodeRatingCount
__all__ = ['CommentSerializer',
'RatingSerializer',
'CommentRelationSerializer',
'VoteSerializer',
'ParticipationSerializer']
class AutoNodeMixin(object):
"""
automatically adds node to validated_data
the node info is taken from views that extend NodeRelationViewMixin
"""
def validate(self, data):
data['node'] = self.context['view'].node
return super(AutoNodeMixin, self).validate(data)
class CommentSerializer(AutoNodeMixin, ModelValidationSerializer):
""" Comment serializer """
node = serializers.ReadOnlyField(source='node.name')
username = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Comment
fields = ('node', 'username', 'text', 'added')
read_only_fields = ('added',)
class CommentRelationSerializer(serializers.ModelSerializer):
""" display user info """
user = ProfileRelationSerializer()
class Meta:
model = Comment
fields = ('user', 'text', 'added',)
class RatingSerializer(AutoNodeMixin, ModelValidationSerializer):
""" Rating serializer """
node = serializers.ReadOnlyField(source='node.name')
username = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Rating
fields = ('node', 'username', 'value',)
read_only_fields = ('added',)
class VoteSerializer(AutoNodeMixin, ModelValidationSerializer):
node = serializers.ReadOnlyField(source='node.name')
username = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Vote
fields = ('node', 'username', 'vote',)
read_only_fields = ('added',)
class ParticipationSerializer(serializers.ModelSerializer):
class Meta:
model = NodeRatingCount
fields = ('likes', 'dislikes', 'rating_count',
'rating_avg', 'comment_count')
| SCORE42/nodeshot | nodeshot/community/participation/serializers.py | Python | gpl-3.0 | 2,237 |
# Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import optparse_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_text
from ansible.parsing.splitter import parse_kv
from ansible.playbook import Playbook
from ansible.playbook.play import Play
from ansible.plugins.loader import get_all_plugin_loaders
from ansible.utils.display import Display
display = Display()
class AdHocCLI(CLI):
''' is an extra-simple tool/framework/API for doing 'remote things'.
this command allows you to define and run a single task 'playbook' against a set of hosts
'''
def init_parser(self):
''' create an options parser for bin/ansible '''
super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
desc="Define and run a single task 'playbook' against"
" a set of hosts",
epilog="Some modules do not make sense in Ad-Hoc (include,"
" meta, etc)")
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_async_options(self.parser)
opt_help.add_output_options(self.parser)
opt_help.add_connect_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
# options unique to ansible ad-hoc
self.parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
self.parser.add_option('-m', '--module-name', dest='module_name',
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
def post_process_args(self, options, args):
'''Post process and validate options for bin/ansible '''
options, args = super(AdHocCLI, self).post_process_args(options, args)
if len(args) < 1:
raise AnsibleOptionsError("Missing target hosts")
elif len(args) > 1:
raise AnsibleOptionsError("Extraneous options or arguments")
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, vault_opts=True, fork_opts=True)
options = self.normalize_become_options(options)
return options, args
def _play_ds(self, pattern, async_val, poll):
check_raw = context.CLIARGS['module_name'] in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw')
mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': parse_kv(context.CLIARGS['module_args'], check_raw=check_raw)}}
# avoid adding to tasks that don't support it, unless set, then give user an error
if context.CLIARGS['module_name'] not in ('include_role', 'include_tasks') or any(frozenset((async_val, poll))):
mytask['async_val'] = async_val
mytask['poll'] = poll
return dict(
name="Ansible Ad-Hoc",
hosts=pattern,
gather_facts='no',
tasks=[mytask])
def run(self):
''' create and execute the single task playbook '''
super(AdHocCLI, self).run()
# only thing left should be host pattern
pattern = to_text(context.CLIARGS['args'][0], errors='surrogate_or_strict')
sshpass = None
becomepass = None
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# dynamically load any plugins
get_all_plugin_loaders()
loader, inventory, variable_manager = self._play_prereqs()
try:
hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
except AnsibleError:
if context.CLIARGS['subset']:
raise
else:
hosts = []
display.warning("No hosts matched, nothing to do")
if context.CLIARGS['listhosts']:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
err = "No argument passed to %s module" % context.CLIARGS['module_name']
if pattern.endswith(".yml"):
err = err + ' (did you mean to run ansible-playbook?)'
raise AnsibleOptionsError(err)
# Avoid modules that don't work with ad-hoc
if context.CLIARGS['module_name'] in ('import_playbook',):
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
% context.CLIARGS['module_name'])
play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
# used in start callback
playbook = Playbook(loader)
playbook._entries.append(play)
playbook._file_name = '__adhoc_playbook__'
if self.callback:
cb = self.callback
elif context.CLIARGS['one_line']:
cb = 'oneline'
# Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
cb = C.DEFAULT_STDOUT_CALLBACK
else:
cb = 'minimal'
run_tree = False
if context.CLIARGS['tree']:
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
C.TREE_DIR = context.CLIARGS['tree']
run_tree = True
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
forks=context.CLIARGS['forks'],
)
self._tqm.send_callback('v2_playbook_on_start', playbook)
result = self._tqm.run(play)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
finally:
if self._tqm:
self._tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
return result
| gregdek/ansible | lib/ansible/cli/adhoc.py | Python | gpl-3.0 | 7,375 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_access_control
import test_users
import test_groups
| ovnicraft/odoo_addons | smile_access_control/tests/__init__.py | Python | agpl-3.0 | 1,040 |
"""
Common utility functions useful throughout the contentstore
"""
import logging
from datetime import datetime
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import CourseKey, UsageKey
from pytz import UTC
from six import text_type
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from student import auth
from student.models import CourseEnrollment
from student.roles import CourseInstructorRole, CourseStaffRole
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.partitions.partitions_service import get_all_partitions_for_course
log = logging.getLogger(__name__)
def add_instructor(course_key, requesting_user, new_instructor):
"""
Adds given user as instructor and staff to the given course,
after verifying that the requesting_user has permission to do so.
"""
# can't use auth.add_users here b/c it requires user to already have Instructor perms in this course
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
def initialize_permissions(course_key, user_who_created_course):
"""
Initializes a new course by enrolling the course creator as a student,
and initializing Forum by seeding its permissions and assigning default roles.
"""
# seed the forums
seed_permissions_roles(course_key)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user_who_created_course, course_key)
# set default forum roles (assign 'Student' role)
assign_default_role(course_key, user_who_created_course)
def remove_all_instructors(course_key):
"""
Removes all instructor and staff users from the given course.
"""
staff_role = CourseStaffRole(course_key)
staff_role.remove_users(*staff_role.users_with_role())
instructor_role = CourseInstructorRole(course_key)
instructor_role.remove_users(*instructor_role.users_with_role())
def delete_course(course_key, user_id, keep_instructors=False):
"""
Delete course from module store and if specified remove user and
groups permissions from course.
"""
_delete_course_from_modulestore(course_key, user_id)
if not keep_instructors:
_remove_instructors(course_key)
def _delete_course_from_modulestore(course_key, user_id):
"""
Delete course from MongoDB. Deleting course will fire a signal which will result into
deletion of the courseware associated with a course_key.
"""
module_store = modulestore()
with module_store.bulk_operations(course_key):
module_store.delete_course(course_key, user_id)
def _remove_instructors(course_key):
"""
In the django layer, remove all the user/groups permissions associated with this course
"""
print 'removing User permissions from course....'
try:
remove_all_instructors(course_key)
except Exception as err:
log.error("Error in deleting course groups for {0}: {1}".format(course_key, err))
def get_lms_link_for_item(location, preview=False):
"""
Returns an LMS link to the course with a jump_to to the provided location.
:param location: the location to jump to
:param preview: True if the preview version of LMS should be returned. Default value is false.
"""
assert isinstance(location, UsageKey)
# checks LMS_BASE value in site configuration for the given course_org_filter(org)
# if not found returns settings.LMS_BASE
lms_base = SiteConfiguration.get_value_for_org(
location.org,
"LMS_BASE",
settings.LMS_BASE
)
if lms_base is None:
return None
if preview:
# checks PREVIEW_LMS_BASE value in site configuration for the given course_org_filter(org)
# if not found returns settings.FEATURES.get('PREVIEW_LMS_BASE')
lms_base = SiteConfiguration.get_value_for_org(
location.org,
"PREVIEW_LMS_BASE",
settings.FEATURES.get('PREVIEW_LMS_BASE')
)
return u"//{lms_base}/courses/{course_key}/jump_to/{location}".format(
lms_base=lms_base,
course_key=text_type(location.course_key),
location=text_type(location),
)
# pylint: disable=invalid-name
def get_lms_link_for_certificate_web_view(user_id, course_key, mode):
"""
Returns the url to the certificate web view.
"""
assert isinstance(course_key, CourseKey)
# checks LMS_BASE value in SiteConfiguration against course_org_filter if not found returns settings.LMS_BASE
lms_base = SiteConfiguration.get_value_for_org(course_key.org, "LMS_BASE", settings.LMS_BASE)
if lms_base is None:
return None
return u"//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}".format(
certificate_web_base=lms_base,
user_id=user_id,
course_id=unicode(course_key),
mode=mode
)
# pylint: disable=invalid-name
def is_currently_visible_to_students(xblock):
"""
Returns true if there is a published version of the xblock that is currently visible to students.
This means that it has a release date in the past, and the xblock has not been set to staff only.
"""
try:
published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only)
# If there's no published version then the xblock is clearly not visible
except ItemNotFoundError:
return False
# If visible_to_staff_only is True, this xblock is not visible to students regardless of start date.
if published.visible_to_staff_only:
return False
# Check start date
if 'detached' not in published._class_tags and published.start is not None:
return datetime.now(UTC) > published.start
# No start date, so it's always visible
return True
def has_children_visible_to_specific_partition_groups(xblock):
"""
Returns True if this xblock has children that are limited to specific user partition groups.
Note that this method is not recursive (it does not check grandchildren).
"""
if not xblock.has_children:
return False
for child in xblock.get_children():
if is_visible_to_specific_partition_groups(child):
return True
return False
def is_visible_to_specific_partition_groups(xblock):
"""
Returns True if this xblock has visibility limited to specific user partition groups.
"""
if not xblock.group_access:
return False
for partition in get_user_partition_info(xblock):
if any(g["selected"] for g in partition["groups"]):
return True
return False
def find_release_date_source(xblock):
"""
Finds the ancestor of xblock that set its release date.
"""
# Stop searching at the section level
if xblock.category == 'chapter':
return xblock
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own release date
if not parent_location:
return xblock
parent = modulestore().get_item(parent_location)
if parent.start != xblock.start:
return xblock
else:
return find_release_date_source(parent)
def find_staff_lock_source(xblock):
"""
Returns the xblock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked.
If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock.
"""
# Stop searching if this xblock has explicitly set its own staff lock
if xblock.fields['visible_to_staff_only'].is_set_on(xblock):
return xblock
# Stop searching at the section level
if xblock.category == 'chapter':
return None
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own staff lock
if not parent_location:
return None
parent = modulestore().get_item(parent_location)
return find_staff_lock_source(parent)
def ancestor_has_staff_lock(xblock, parent_xblock=None):
"""
Returns True iff one of xblock's ancestors has staff lock.
Can avoid mongo query by passing in parent_xblock.
"""
if parent_xblock is None:
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
if not parent_location:
return False
parent_xblock = modulestore().get_item(parent_location)
return parent_xblock.visible_to_staff_only
def reverse_url(handler_name, key_name=None, key_value=None, kwargs=None):
"""
Creates the URL for the given handler.
The optional key_name and key_value are passed in as kwargs to the handler.
"""
kwargs_for_reverse = {key_name: unicode(key_value)} if key_name else None
if kwargs:
kwargs_for_reverse.update(kwargs)
return reverse(handler_name, kwargs=kwargs_for_reverse)
def reverse_course_url(handler_name, course_key, kwargs=None):
"""
Creates the URL for handlers that use course_keys as URL parameters.
"""
return reverse_url(handler_name, 'course_key_string', course_key, kwargs)
def reverse_library_url(handler_name, library_key, kwargs=None):
"""
Creates the URL for handlers that use library_keys as URL parameters.
"""
return reverse_url(handler_name, 'library_key_string', library_key, kwargs)
def reverse_usage_url(handler_name, usage_key, kwargs=None):
"""
Creates the URL for handlers that use usage_keys as URL parameters.
"""
return reverse_url(handler_name, 'usage_key_string', usage_key, kwargs)
def get_split_group_display_name(xblock, course):
"""
Returns group name if an xblock is found in user partition groups that are suitable for the split_test module.
Arguments:
xblock (XBlock): The courseware component.
course (XBlock): The course descriptor.
Returns:
group name (String): Group name of the matching group xblock.
"""
for user_partition in get_user_partition_info(xblock, schemes=['random'], course=course):
for group in user_partition['groups']:
if 'Group ID {group_id}'.format(group_id=group['id']) == xblock.display_name_with_default:
return group['name']
def get_user_partition_info(xblock, schemes=None, course=None):
"""
Retrieve user partition information for an XBlock for display in editors.
* If a partition has been disabled, it will be excluded from the results.
* If a group within a partition is referenced by the XBlock, but the group has been deleted,
the group will be marked as deleted in the results.
Arguments:
xblock (XBlock): The courseware component being edited.
Keyword Arguments:
schemes (iterable of str): If provided, filter partitions to include only
schemes with the provided names.
course (XBlock): The course descriptor. If provided, uses this to look up the user partitions
instead of loading the course. This is useful if we're calling this function multiple
times for the same course want to minimize queries to the modulestore.
Returns: list
Example Usage:
>>> get_user_partition_info(block, schemes=["cohort", "verification"])
[
{
"id": 12345,
"name": "Cohorts"
"scheme": "cohort",
"groups": [
{
"id": 7890,
"name": "Foo",
"selected": True,
"deleted": False,
}
]
},
{
"id": 7292,
"name": "Midterm A",
"scheme": "verification",
"groups": [
{
"id": 1,
"name": "Completed verification at Midterm A",
"selected": False,
"deleted": False
},
{
"id": 0,
"name": "Did not complete verification at Midterm A",
"selected": False,
"deleted": False,
}
]
}
]
"""
course = course or modulestore().get_course(xblock.location.course_key)
if course is None:
log.warning(
"Could not find course %s to retrieve user partition information",
xblock.location.course_key
)
return []
if schemes is not None:
schemes = set(schemes)
partitions = []
for p in sorted(get_all_partitions_for_course(course, active_only=True), key=lambda p: p.name):
# Exclude disabled partitions, partitions with no groups defined
# The exception to this case is when there is a selected group within that partition, which means there is
# a deleted group
# Also filter by scheme name if there's a filter defined.
selected_groups = set(xblock.group_access.get(p.id, []) or [])
if (p.groups or selected_groups) and (schemes is None or p.scheme.name in schemes):
# First, add groups defined by the partition
groups = []
for g in p.groups:
# Falsey group access for a partition mean that all groups
# are selected. In the UI, though, we don't show the particular
# groups selected, since there's a separate option for "all users".
groups.append({
"id": g.id,
"name": g.name,
"selected": g.id in selected_groups,
"deleted": False,
})
# Next, add any groups set on the XBlock that have been deleted
all_groups = set(g.id for g in p.groups)
missing_group_ids = selected_groups - all_groups
for gid in missing_group_ids:
groups.append({
"id": gid,
"name": _("Deleted Group"),
"selected": True,
"deleted": True,
})
# Put together the entire partition dictionary
partitions.append({
"id": p.id,
"name": unicode(p.name), # Convert into a string in case ugettext_lazy was used
"scheme": p.scheme.name,
"groups": groups,
})
return partitions
def get_visibility_partition_info(xblock, course=None):
"""
Retrieve user partition information for the component visibility editor.
This pre-processes partition information to simplify the template.
Arguments:
xblock (XBlock): The component being edited.
course (XBlock): The course descriptor. If provided, uses this to look up the user partitions
instead of loading the course. This is useful if we're calling this function multiple
times for the same course want to minimize queries to the modulestore.
Returns: dict
"""
selectable_partitions = []
# We wish to display enrollment partitions before cohort partitions.
enrollment_user_partitions = get_user_partition_info(xblock, schemes=["enrollment_track"], course=course)
# For enrollment partitions, we only show them if there is a selected group or
# or if the number of groups > 1.
for partition in enrollment_user_partitions:
if len(partition["groups"]) > 1 or any(group["selected"] for group in partition["groups"]):
selectable_partitions.append(partition)
# Now add the cohort user partitions.
selectable_partitions = selectable_partitions + get_user_partition_info(xblock, schemes=["cohort"], course=course)
# Find the first partition with a selected group. That will be the one initially enabled in the dialog
# (if the course has only been added in Studio, only one partition should have a selected group).
selected_partition_index = -1
# At the same time, build up all the selected groups as they are displayed in the dialog title.
selected_groups_label = ''
for index, partition in enumerate(selectable_partitions):
for group in partition["groups"]:
if group["selected"]:
if len(selected_groups_label) == 0:
selected_groups_label = group['name']
else:
# Translators: This is building up a list of groups. It is marked for translation because of the
# comma, which is used as a separator between each group.
selected_groups_label = _('{previous_groups}, {current_group}').format(
previous_groups=selected_groups_label,
current_group=group['name']
)
if selected_partition_index == -1:
selected_partition_index = index
return {
"selectable_partitions": selectable_partitions,
"selected_partition_index": selected_partition_index,
"selected_groups_label": selected_groups_label,
}
def get_xblock_aside_instance(usage_key):
"""
Returns: aside instance of a aside xblock
:param usage_key: Usage key of aside xblock
"""
try:
descriptor = modulestore().get_item(usage_key.usage_key)
for aside in descriptor.runtime.get_asides(descriptor):
if aside.scope_ids.block_type == usage_key.aside_type:
return aside
except ItemNotFoundError:
log.warning(u'Unable to load item %s', usage_key.usage_key)
def is_self_paced(course):
"""
Returns True if course is self-paced, False otherwise.
"""
return course and course.self_paced
| Stanford-Online/edx-platform | cms/djangoapps/contentstore/utils.py | Python | agpl-3.0 | 18,481 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyAsn1crypto(PythonPackage):
"""Python ASN.1 library with a focus on performance and a pythonic API """
homepage = "https://github.com/wbond/asn1crypto"
url = "https://pypi.io/packages/source/a/asn1crypto/asn1crypto-0.22.0.tar.gz"
version('0.22.0', '74a8b9402625b38ef19cf3fa69ef8470')
depends_on('py-setuptools', type='build')
| EmreAtes/spack | var/spack/repos/builtin/packages/py-asn1crypto/package.py | Python | lgpl-2.1 | 1,619 |
"""Debounce helper."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable, Callable
from logging import Logger
from typing import Any
from homeassistant.core import HassJob, HomeAssistant, callback
class Debouncer:
"""Class to rate limit calls to a specific command."""
def __init__(
self,
hass: HomeAssistant,
logger: Logger,
*,
cooldown: float,
immediate: bool,
function: Callable[..., Awaitable[Any]] | None = None,
) -> None:
"""Initialize debounce.
immediate: indicate if the function needs to be called right away and
wait <cooldown> until executing next invocation.
function: optional and can be instantiated later.
"""
self.hass = hass
self.logger = logger
self._function = function
self.cooldown = cooldown
self.immediate = immediate
self._timer_task: asyncio.TimerHandle | None = None
self._execute_at_end_of_timer: bool = False
self._execute_lock = asyncio.Lock()
self._job: HassJob | None = None if function is None else HassJob(function)
@property
def function(self) -> Callable[..., Awaitable[Any]] | None:
"""Return the function being wrapped by the Debouncer."""
return self._function
@function.setter
def function(self, function: Callable[..., Awaitable[Any]]) -> None:
"""Update the function being wrapped by the Debouncer."""
self._function = function
if self._job is None or function != self._job.target:
self._job = HassJob(function)
async def async_call(self) -> None:
"""Call the function."""
assert self._job is not None
if self._timer_task:
if not self._execute_at_end_of_timer:
self._execute_at_end_of_timer = True
return
# Locked means a call is in progress. Any call is good, so abort.
if self._execute_lock.locked():
return
if not self.immediate:
self._execute_at_end_of_timer = True
self._schedule_timer()
return
async with self._execute_lock:
# Abort if timer got set while we're waiting for the lock.
if self._timer_task:
return
task = self.hass.async_run_hass_job(self._job)
if task:
await task
self._schedule_timer()
async def _handle_timer_finish(self) -> None:
"""Handle a finished timer."""
assert self._job is not None
self._timer_task = None
if not self._execute_at_end_of_timer:
return
self._execute_at_end_of_timer = False
# Locked means a call is in progress. Any call is good, so abort.
if self._execute_lock.locked():
return
async with self._execute_lock:
# Abort if timer got set while we're waiting for the lock.
if self._timer_task:
return # type: ignore
try:
task = self.hass.async_run_hass_job(self._job)
if task:
await task
except Exception: # pylint: disable=broad-except
self.logger.exception("Unexpected exception from %s", self.function)
self._schedule_timer()
@callback
def async_cancel(self) -> None:
"""Cancel any scheduled call."""
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
self._execute_at_end_of_timer = False
@callback
def _schedule_timer(self) -> None:
"""Schedule a timer."""
self._timer_task = self.hass.loop.call_later(
self.cooldown,
lambda: self.hass.async_create_task(self._handle_timer_finish()),
)
| jawilson/home-assistant | homeassistant/helpers/debounce.py | Python | apache-2.0 | 3,910 |
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from six import text_type
from zerver.models import get_client, UserProfile, Client
from zerver.decorator import asynchronous, \
authenticated_json_post_view, internal_notify_view, RespondAsynchronously, \
has_request_variables, REQ, _RespondAsynchronously
from zerver.lib.response import json_success, json_error
from zerver.lib.validator import check_bool, check_list, check_string
from zerver.lib.event_queue import get_client_descriptor, \
process_notification, fetch_events
from django.core.handlers.base import BaseHandler
from typing import Union, Optional, Iterable, Sequence, List
import time
import ujson
@internal_notify_view
def notify(request):
# type: (HttpRequest) -> HttpResponse
process_notification(ujson.loads(request.POST['data']))
return json_success()
@has_request_variables
def cleanup_event_queue(request, user_profile, queue_id=REQ()):
# type: (HttpRequest, UserProfile, text_type) -> HttpResponse
client = get_client_descriptor(str(queue_id))
if client is None:
return json_error(_("Bad event queue id: %s") % (queue_id,))
if user_profile.id != client.user_profile_id:
return json_error(_("You are not authorized to access this queue"))
request._log_data['extra'] = "[%s]" % (queue_id,)
client.cleanup()
return json_success()
@authenticated_json_post_view
def json_get_events(request, user_profile):
# type: (HttpRequest, UserProfile) -> Union[HttpResponse, _RespondAsynchronously]
return get_events_backend(request, user_profile, apply_markdown=True)
@asynchronous
@has_request_variables
def get_events_backend(request, user_profile, handler,
user_client = REQ(converter=get_client, default=None),
last_event_id = REQ(converter=int, default=None),
queue_id = REQ(default=None),
apply_markdown = REQ(default=False, validator=check_bool),
all_public_streams = REQ(default=False, validator=check_bool),
event_types = REQ(default=None, validator=check_list(check_string)),
dont_block = REQ(default=False, validator=check_bool),
narrow = REQ(default=[], validator=check_list(None)),
lifespan_secs = REQ(default=0, converter=int)):
# type: (HttpRequest, UserProfile, BaseHandler, Optional[Client], Optional[int], Optional[List[text_type]], bool, bool, Optional[text_type], bool, Iterable[Sequence[text_type]], int) -> Union[HttpResponse, _RespondAsynchronously]
if user_client is None:
user_client = request.client
events_query = dict(
user_profile_id = user_profile.id,
user_profile_email = user_profile.email,
queue_id = queue_id,
last_event_id = last_event_id,
event_types = event_types,
client_type_name = user_client.name,
all_public_streams = all_public_streams,
lifespan_secs = lifespan_secs,
narrow = narrow,
dont_block = dont_block,
handler_id = handler.handler_id)
if queue_id is None:
events_query['new_queue_data'] = dict(
user_profile_id = user_profile.id,
realm_id = user_profile.realm.id,
user_profile_email = user_profile.email,
event_types = event_types,
client_type_name = user_client.name,
apply_markdown = apply_markdown,
all_public_streams = all_public_streams,
queue_timeout = lifespan_secs,
last_connection_time = time.time(),
narrow = narrow)
result = fetch_events(events_query)
if "extra_log_data" in result:
request._log_data['extra'] = result["extra_log_data"]
if result["type"] == "async":
handler._request = request
return RespondAsynchronously
if result["type"] == "error":
return json_error(result["message"])
return json_success(result["response"])
| umkay/zulip | zerver/tornadoviews.py | Python | apache-2.0 | 4,131 |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from taskflow.patterns import linear_flow as lf
from taskflow import task
from glance import i18n
_ = i18n._
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
LOG = logging.getLogger(__name__)
convert_task_opts = [
cfg.StrOpt('conversion_format',
default=None,
choices=('qcow2', 'raw', 'vmdk'),
help=_("The format to which images will be automatically "
"converted.")),
]
CONF = cfg.CONF
# NOTE(flaper87): Registering under the taskflow_executor section
# for now. It seems a waste to have a whole section dedicated to a
# single task with a single option.
CONF.register_opts(convert_task_opts, group='taskflow_executor')
class _Convert(task.Task):
conversion_missing_warned = False
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Convert, self).__init__(
name='%s-Convert-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
# NOTE(flaper87): A format must be explicitly
# specified. There's no "sane" default for this
# because the dest format may work differently depending
# on the environment OpenStack is running in.
conversion_format = CONF.taskflow_executor.conversion_format
if conversion_format is None:
if not _Convert.conversion_missing_warned:
msg = (_LW('The conversion format is None, please add a value '
'for it in the config file for this task to '
'work: %s') %
self.task_id)
LOG.warn(msg)
_Convert.conversion_missing_warned = True
return
# TODO(flaper87): Check whether the image is in the desired
# format already. Probably using `qemu-img` just like the
# `Introspection` task.
dest_path = os.path.join(CONF.task.work_dir, "%s.converted" % image_id)
stdout, stderr = putils.trycmd('qemu-img', 'convert', '-O',
conversion_format, file_path, dest_path,
log_errors=putils.LOG_ALL_ERRORS)
if stderr:
raise RuntimeError(stderr)
os.rename(dest_path, file_path.split("file://")[-1])
return file_path
def revert(self, image_id, result=None, **kwargs):
# NOTE(flaper87): If result is None, it probably
# means this task failed. Otherwise, we would have
# a result from its execution.
if result is None:
return
fs_path = result.split("file://")[-1]
if os.path.exists(fs_path):
os.path.remove(fs_path)
def get_flow(**kwargs):
"""Return task flow for converting images to different formats.
:param task_id: Task ID.
:param task_type: Type of the task.
:param image_repo: Image repository used.
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
return lf.Flow(task_type).add(
_Convert(task_id, task_type, image_repo),
)
| saeki-masaki/glance | glance/async/flows/convert.py | Python | apache-2.0 | 3,953 |
#
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Gluster storage class.
This class is very similar to FileStorage, given that Gluster when mounted
behaves essentially like a regular file system. Unlike RBD, there are no
special provisions for block device abstractions (yet).
"""
import logging
import os
import socket
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import constants
from ganeti import ssconf
from ganeti.utils import io
from ganeti.storage import base
from ganeti.storage.filestorage import FileDeviceHelper
class GlusterVolume(object):
"""This class represents a Gluster volume.
Volumes are uniquely identified by:
- their IP address
- their port
- the volume name itself
Two GlusterVolume objects x, y with same IP address, port and volume name
are considered equal.
"""
def __init__(self, server_addr, port, volume, _run_cmd=utils.RunCmd,
_mount_point=None):
"""Creates a Gluster volume object.
@type server_addr: str
@param server_addr: The address to connect to
@type port: int
@param port: The port to connect to (Gluster standard is 24007)
@type volume: str
@param volume: The gluster volume to use for storage.
"""
self.server_addr = server_addr
server_ip = netutils.Hostname.GetIP(self.server_addr)
self._server_ip = server_ip
port = netutils.ValidatePortNumber(port)
self._port = port
self._volume = volume
if _mount_point: # tests
self.mount_point = _mount_point
else:
self.mount_point = ssconf.SimpleStore().GetGlusterStorageDir()
self._run_cmd = _run_cmd
@property
def server_ip(self):
return self._server_ip
@property
def port(self):
return self._port
@property
def volume(self):
return self._volume
def __eq__(self, other):
return (self.server_ip, self.port, self.volume) == \
(other.server_ip, other.port, other.volume)
def __repr__(self):
return """GlusterVolume("{ip}", {port}, "{volume}")""" \
.format(ip=self.server_ip, port=self.port, volume=self.volume)
def __hash__(self):
return (self.server_ip, self.port, self.volume).__hash__()
def _IsMounted(self):
"""Checks if we are mounted or not.
@rtype: bool
@return: True if this volume is mounted.
"""
if not os.path.exists(self.mount_point):
return False
return os.path.ismount(self.mount_point)
def _GuessMountFailReasons(self):
"""Try and give reasons why the mount might've failed.
@rtype: str
@return: A semicolon-separated list of problems found with the current setup
suitable for display to the user.
"""
reasons = []
# Does the mount point exist?
if not os.path.exists(self.mount_point):
reasons.append("%r: does not exist" % self.mount_point)
# Okay, it exists, but is it a directory?
elif not os.path.isdir(self.mount_point):
reasons.append("%r: not a directory" % self.mount_point)
# If, for some unfortunate reason, this folder exists before mounting:
#
# /var/run/ganeti/gluster/gv0/10.0.0.1:30000:gv0/
# '--------- cwd ------------'
#
# and you _are_ trying to mount the gluster volume gv0 on 10.0.0.1:30000,
# then the mount.glusterfs command parser gets confused and this command:
#
# mount -t glusterfs 10.0.0.1:30000:gv0 /var/run/ganeti/gluster/gv0
# '-- remote end --' '------ mountpoint -------'
#
# gets parsed instead like this:
#
# mount -t glusterfs 10.0.0.1:30000:gv0 /var/run/ganeti/gluster/gv0
# '-- mountpoint --' '----- syntax error ------'
#
# and if there _is_ a gluster server running locally at the default remote
# end, localhost:24007, then this is not a network error and therefore... no
# usage message gets printed out. All you get is a Byson parser error in the
# gluster log files about an unexpected token in line 1, "". (That's stdin.)
#
# Not that we rely on that output in any way whatsoever...
parser_confusing = io.PathJoin(self.mount_point,
self._GetFUSEMountString())
if os.path.exists(parser_confusing):
reasons.append("%r: please delete, rename or move." % parser_confusing)
# Let's try something else: can we connect to the server?
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.server_ip, self.port))
sock.close()
except socket.error as err:
reasons.append("%s:%d: %s" % (self.server_ip, self.port, err.strerror))
reasons.append("try running 'gluster volume info %s' on %s to ensure"
" it exists, it is started and it is using the tcp"
" transport" % (self.volume, self.server_ip))
return "; ".join(reasons)
def _GetFUSEMountString(self):
"""Return the string FUSE needs to mount this volume.
@rtype: str
"""
return "-o server-port={port} {ip}:/{volume}" \
.format(port=self.port, ip=self.server_ip, volume=self.volume)
def GetKVMMountString(self, path):
"""Return the string KVM needs to use this volume.
@rtype: str
"""
ip = self.server_ip
if netutils.IPAddress.GetAddressFamily(ip) == socket.AF_INET6:
ip = "[%s]" % ip
return "gluster://{ip}:{port}/{volume}/{path}" \
.format(ip=ip, port=self.port, volume=self.volume, path=path)
def Mount(self):
"""Try and mount the volume. No-op if the volume is already mounted.
@raises BlockDeviceError: if the mount was unsuccessful
@rtype: context manager
@return: A simple context manager that lets you use this volume for
short lived operations like so::
with volume.mount():
# Do operations on volume
# Volume is now unmounted
"""
class _GlusterVolumeContextManager(object):
def __init__(self, volume):
self.volume = volume
def __enter__(self):
# We're already mounted.
return self
def __exit__(self, *exception_information):
self.volume.Unmount()
return False # do not swallow exceptions.
if self._IsMounted():
return _GlusterVolumeContextManager(self)
command = ["mount",
"-t", "glusterfs",
self._GetFUSEMountString(),
self.mount_point]
io.Makedirs(self.mount_point)
self._run_cmd(" ".join(command),
# Why set cwd? Because it's an area we control. If,
# for some unfortunate reason, this folder exists:
# "/%s/" % _GetFUSEMountString()
# ...then the gluster parser gets confused and treats
# _GetFUSEMountString() as your mount point and
# self.mount_point becomes a syntax error.
cwd=self.mount_point)
# mount.glusterfs exits with code 0 even after failure.
# https://bugzilla.redhat.com/show_bug.cgi?id=1031973
if not self._IsMounted():
reasons = self._GuessMountFailReasons()
if not reasons:
reasons = "%r failed." % (" ".join(command))
base.ThrowError("%r: mount failure: %s",
self.mount_point,
reasons)
return _GlusterVolumeContextManager(self)
def Unmount(self):
"""Try and unmount the volume.
Failures are logged but otherwise ignored.
@raises BlockDeviceError: if the volume was not mounted to begin with.
"""
if not self._IsMounted():
base.ThrowError("%r: should be mounted but isn't.", self.mount_point)
result = self._run_cmd(["umount",
self.mount_point])
if result.failed:
logging.warning("Failed to unmount %r from %r: %s",
self, self.mount_point, result.fail_reason)
class GlusterStorage(base.BlockDev):
"""File device using the Gluster backend.
This class represents a file storage backend device stored on Gluster. Ganeti
mounts and unmounts the Gluster devices automatically.
The unique_id for the file device is a (file_driver, file_path) tuple.
"""
def __init__(self, unique_id, children, size, params, dyn_params, **kwargs):
"""Initalizes a file device backend.
"""
if children:
base.ThrowError("Invalid setup for file device")
try:
self.driver, self.path = unique_id
except ValueError: # wrong number of arguments
raise ValueError("Invalid configuration data %s" % repr(unique_id))
server_addr = params[constants.GLUSTER_HOST]
port = params[constants.GLUSTER_PORT]
volume = params[constants.GLUSTER_VOLUME]
self.volume = GlusterVolume(server_addr, port, volume)
self.full_path = io.PathJoin(self.volume.mount_point, self.path)
self.file = None
super(GlusterStorage, self).__init__(unique_id, children, size,
params, dyn_params, **kwargs)
self.Attach()
def Assemble(self):
"""Assemble the device.
Checks whether the file device exists, raises BlockDeviceError otherwise.
"""
assert self.attached, "Gluster file assembled without being attached"
self.file.Exists(assert_exists=True)
def Shutdown(self):
"""Shutdown the device.
"""
self.file = None
self.dev_path = None
self.attached = False
def Open(self, force=False, exclusive=True):
"""Make the device ready for I/O.
This is a no-op for the file type.
"""
assert self.attached, "Gluster file opened without being attached"
def Close(self):
"""Notifies that the device will no longer be used for I/O.
This is a no-op for the file type.
"""
pass
def Remove(self):
"""Remove the file backing the block device.
@rtype: boolean
@return: True if the removal was successful
"""
with self.volume.Mount():
self.file = FileDeviceHelper(self.full_path)
if self.file.Remove():
self.file = None
return True
else:
return False
def Rename(self, new_id):
"""Renames the file.
"""
# TODO: implement rename for file-based storage
base.ThrowError("Rename is not supported for Gluster storage")
def Grow(self, amount, dryrun, backingstore, excl_stor):
"""Grow the file
@param amount: the amount (in mebibytes) to grow with
"""
self.file.Grow(amount, dryrun, backingstore, excl_stor)
def Attach(self, **kwargs):
"""Attach to an existing file.
Check if this file already exists.
@rtype: boolean
@return: True if file exists
"""
try:
self.volume.Mount()
self.file = FileDeviceHelper(self.full_path)
self.dev_path = self.full_path
except Exception as err:
self.volume.Unmount()
raise err
self.attached = self.file.Exists()
return self.attached
def GetActualSize(self):
"""Return the actual disk size.
@note: the device needs to be active when this is called
"""
return self.file.Size()
def GetUserspaceAccessUri(self, hypervisor):
"""Generate KVM userspace URIs to be used as `-drive file` settings.
@see: L{BlockDev.GetUserspaceAccessUri}
@see: https://github.com/qemu/qemu/commit/8d6d89cb63c57569864ecdeb84d3a1c2eb
"""
if hypervisor == constants.HT_KVM:
return self.volume.GetKVMMountString(self.path)
else:
base.ThrowError("Hypervisor %s doesn't support Gluster userspace access" %
hypervisor)
@classmethod
def Create(cls, unique_id, children, size, spindles, params, excl_stor,
dyn_params, **kwargs):
"""Create a new file.
@param size: the size of file in MiB
@rtype: L{bdev.FileStorage}
@return: an instance of FileStorage
"""
if excl_stor:
raise errors.ProgrammerError("FileStorage device requested with"
" exclusive_storage")
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
raise ValueError("Invalid configuration data %s" % str(unique_id))
full_path = unique_id[1]
server_addr = params[constants.GLUSTER_HOST]
port = params[constants.GLUSTER_PORT]
volume = params[constants.GLUSTER_VOLUME]
volume_obj = GlusterVolume(server_addr, port, volume)
full_path = io.PathJoin(volume_obj.mount_point, full_path)
# Possible optimization: defer actual creation to first Attach, rather
# than mounting and unmounting here, then remounting immediately after.
with volume_obj.Mount():
FileDeviceHelper.CreateFile(full_path, size, create_folders=True)
return GlusterStorage(unique_id, children, size, params, dyn_params,
**kwargs)
| andir/ganeti | lib/storage/gluster.py | Python | bsd-2-clause | 14,117 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import *
from trac.config import ConfigSection
from trac.perm import IPermissionRequestor
class ExtraPermissionsProvider(Component):
"""Extra permission provider."""
implements(IPermissionRequestor)
extra_permissions_section = ConfigSection('extra-permissions',
doc="""This section provides a way to add arbitrary permissions to a
Trac environment. This can be useful for adding new permissions to use
for workflow actions, for example.
To add new permissions, create a new section `[extra-permissions]` in
your `trac.ini`. Every entry in that section defines a meta-permission
and a comma-separated list of permissions. For example:
{{{
[extra-permissions]
extra_admin = extra_view, extra_modify, extra_delete
}}}
This entry will define three new permissions `EXTRA_VIEW`,
`EXTRA_MODIFY` and `EXTRA_DELETE`, as well as a meta-permissions
`EXTRA_ADMIN` that grants all three permissions.
If you don't want a meta-permission, start the meta-name with an
underscore (`_`):
{{{
[extra-permissions]
_perms = extra_view, extra_modify
}}}
""")
def get_permission_actions(self):
permissions = {}
for meta, perms in self.extra_permissions_section.options():
perms = [each.strip().upper() for each in perms.split(',')]
for perm in perms:
permissions.setdefault(perm, [])
meta = meta.strip().upper()
if meta and not meta.startswith('_'):
permissions.setdefault(meta, []).extend(perms)
return [(k, v) if v else k for k, v in permissions.iteritems()]
| trac-ja/trac-ja | tracopt/perm/config_perm_provider.py | Python | bsd-3-clause | 2,233 |
"""
=============================================
Effect of varying threshold for self-training
=============================================
This example illustrates the effect of a varying threshold on self-training.
The `breast_cancer` dataset is loaded, and labels are deleted such that only 50
out of 569 samples have labels. A `SelfTrainingClassifier` is fitted on this
dataset, with varying thresholds.
The upper graph shows the amount of labeled samples that the classifier has
available by the end of fit, and the accuracy of the classifier. The lower
graph shows the last iteration in which a sample was labeled. All values are
cross validated with 3 folds.
At low thresholds (in [0.4, 0.5]), the classifier learns from samples that were
labeled with a low confidence. These low-confidence samples are likely have
incorrect predicted labels, and as a result, fitting on these incorrect labels
produces a poor accuracy. Note that the classifier labels almost all of the
samples, and only takes one iteration.
For very high thresholds (in [0.9, 1)) we observe that the classifier does not
augment its dataset (the amount of self-labeled samples is 0). As a result, the
accuracy achieved with a threshold of 0.9999 is the same as a normal supervised
classifier would achieve.
The optimal accuracy lies in between both of these extremes at a threshold of
around 0.7.
"""
# Authors: Oliver Rausch <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
n_splits = 3
X, y = datasets.load_breast_cancer(return_X_y=True)
X, y = shuffle(X, y, random_state=42)
y_true = y.copy()
y[50:] = -1
total_samples = y.shape[0]
base_classifier = SVC(probability=True, gamma=0.001, random_state=42)
x_values = np.arange(0.4, 1.05, 0.05)
x_values = np.append(x_values, 0.99999)
scores = np.empty((x_values.shape[0], n_splits))
amount_labeled = np.empty((x_values.shape[0], n_splits))
amount_iterations = np.empty((x_values.shape[0], n_splits))
for (i, threshold) in enumerate(x_values):
self_training_clf = SelfTrainingClassifier(base_classifier, threshold=threshold)
# We need manual cross validation so that we don't treat -1 as a separate
# class when computing accuracy
skfolds = StratifiedKFold(n_splits=n_splits)
for fold, (train_index, test_index) in enumerate(skfolds.split(X, y)):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
y_test_true = y_true[test_index]
self_training_clf.fit(X_train, y_train)
# The amount of labeled samples that at the end of fitting
amount_labeled[i, fold] = (
total_samples
- np.unique(self_training_clf.labeled_iter_, return_counts=True)[1][0]
)
# The last iteration the classifier labeled a sample in
amount_iterations[i, fold] = np.max(self_training_clf.labeled_iter_)
y_pred = self_training_clf.predict(X_test)
scores[i, fold] = accuracy_score(y_test_true, y_pred)
ax1 = plt.subplot(211)
ax1.errorbar(
x_values, scores.mean(axis=1), yerr=scores.std(axis=1), capsize=2, color="b"
)
ax1.set_ylabel("Accuracy", color="b")
ax1.tick_params("y", colors="b")
ax2 = ax1.twinx()
ax2.errorbar(
x_values,
amount_labeled.mean(axis=1),
yerr=amount_labeled.std(axis=1),
capsize=2,
color="g",
)
ax2.set_ylim(bottom=0)
ax2.set_ylabel("Amount of labeled samples", color="g")
ax2.tick_params("y", colors="g")
ax3 = plt.subplot(212, sharex=ax1)
ax3.errorbar(
x_values,
amount_iterations.mean(axis=1),
yerr=amount_iterations.std(axis=1),
capsize=2,
color="b",
)
ax3.set_ylim(bottom=0)
ax3.set_ylabel("Amount of iterations")
ax3.set_xlabel("Threshold")
plt.show()
| manhhomienbienthuy/scikit-learn | examples/semi_supervised/plot_self_training_varying_threshold.py | Python | bsd-3-clause | 4,008 |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import imp
import os
import platform
import re
import subprocess
import sys
from . import option_list
from digits import device_query
from digits.utils import parse_version
def load_from_envvar(envvar):
"""
Load information from an installation indicated by an environment variable
"""
value = os.environ[envvar].strip().strip("\"' ")
if platform.system() == 'Windows':
executable_dir = os.path.join(value, 'install', 'bin')
python_dir = os.path.join(value, 'install', 'python')
else:
executable_dir = os.path.join(value, 'build', 'tools')
python_dir = os.path.join(value, 'python')
try:
executable = find_executable_in_dir(executable_dir)
if executable is None:
raise ValueError('Caffe executable not found at "%s"'
% executable_dir)
if not is_pycaffe_in_dir(python_dir):
raise ValueError('Pycaffe not found in "%s"'
% python_dir)
import_pycaffe(python_dir)
version, flavor = get_version_and_flavor(executable)
except:
print ('"%s" from %s does not point to a valid installation of Caffe.'
% (value, envvar))
print 'Use the envvar CAFFE_ROOT to indicate a valid installation.'
raise
return executable, version, flavor
def load_from_path():
"""
Load information from an installation on standard paths (PATH and PYTHONPATH)
"""
try:
executable = find_executable_in_dir()
if executable is None:
raise ValueError('Caffe executable not found in PATH')
if not is_pycaffe_in_dir():
raise ValueError('Pycaffe not found in PYTHONPATH')
import_pycaffe()
version, flavor = get_version_and_flavor(executable)
except:
print 'A valid Caffe installation was not found on your system.'
print 'Use the envvar CAFFE_ROOT to indicate a valid installation.'
raise
return executable, version, flavor
def find_executable_in_dir(dirname=None):
"""
Returns the path to the caffe executable at dirname
If dirname is None, search all directories in sys.path
Returns None if not found
"""
if platform.system() == 'Windows':
exe_name = 'caffe.exe'
else:
exe_name = 'caffe'
if dirname is None:
dirnames = [path.strip("\"' ") for path in os.environ['PATH'].split(os.pathsep)]
else:
dirnames = [dirname]
for dirname in dirnames:
path = os.path.join(dirname, exe_name)
if os.path.isfile(path) and os.access(path, os.X_OK):
return path
return None
def is_pycaffe_in_dir(dirname=None):
"""
Returns True if you can "import caffe" from dirname
If dirname is None, search all directories in sys.path
"""
old_path = sys.path
if dirname is not None:
sys.path = [dirname] # temporarily replace sys.path
try:
imp.find_module('caffe')
except ImportError:
return False
finally:
sys.path = old_path
return True
def import_pycaffe(dirname=None):
"""
Imports caffe
If dirname is not None, prepend it to sys.path first
"""
if dirname is not None:
sys.path.insert(0, dirname)
# Add to PYTHONPATH so that build/tools/caffe is aware of python layers there
os.environ['PYTHONPATH'] = '%s%s%s' % (
dirname, os.pathsep, os.environ.get('PYTHONPATH'))
# Suppress GLOG output for python bindings
GLOG_minloglevel = os.environ.pop('GLOG_minloglevel', None)
# Show only "ERROR" and "FATAL"
os.environ['GLOG_minloglevel'] = '2'
# for Windows environment, loading h5py before caffe solves the issue mentioned in
# https://github.com/NVIDIA/DIGITS/issues/47#issuecomment-206292824
import h5py # noqa
try:
import caffe
except ImportError:
print 'Did you forget to "make pycaffe"?'
raise
# Strange issue with protocol buffers and pickle - see issue #32
sys.path.insert(0, os.path.join(
os.path.dirname(caffe.__file__), 'proto'))
# Turn GLOG output back on for subprocess calls
if GLOG_minloglevel is None:
del os.environ['GLOG_minloglevel']
else:
os.environ['GLOG_minloglevel'] = GLOG_minloglevel
def get_version_and_flavor(executable):
"""
Returns (version, flavor)
Should be called after import_pycaffe()
"""
version_string = get_version_from_pycaffe()
if version_string is None:
version_string = get_version_from_cmdline(executable)
if version_string is None:
version_string = get_version_from_soname(executable)
if version_string is None:
raise ValueError('Could not find version information for Caffe build ' +
'at "%s". Upgrade your installation' % executable)
version = parse_version(version_string)
if parse_version(0, 99, 0) > version > parse_version(0, 9, 0):
flavor = 'NVIDIA'
minimum_version = '0.11.0'
if version < parse_version(minimum_version):
raise ValueError(
'Required version "%s" is greater than "%s". Upgrade your installation.'
% (minimum_version, version_string))
else:
flavor = 'BVLC'
return version_string, flavor
def get_version_from_pycaffe():
try:
from caffe import __version__ as version
return version
except ImportError:
return None
def get_version_from_cmdline(executable):
command = [executable, '-version']
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait():
print p.stderr.read().strip()
raise RuntimeError('"%s" returned error code %s' % (command, p.returncode))
pattern = 'version'
for line in p.stdout:
if pattern in line:
return line[line.find(pattern) + len(pattern) + 1:].strip()
return None
def get_version_from_soname(executable):
command = ['ldd', executable]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait():
print p.stderr.read().strip()
raise RuntimeError('"%s" returned error code %s' % (command, p.returncode))
# Search output for caffe library
libname = 'libcaffe'
caffe_line = None
for line in p.stdout:
if libname in line:
caffe_line = line
break
if caffe_line is None:
raise ValueError('libcaffe not found in linked libraries for "%s"'
% executable)
# Read the symlink for libcaffe from ldd output
symlink = caffe_line.split()[2]
filename = os.path.basename(os.path.realpath(symlink))
# parse the version string
match = re.match(r'%s(.*)\.so\.(\S+)$' % (libname), filename)
if match:
return match.group(2)
else:
return None
if 'CAFFE_ROOT' in os.environ:
executable, version, flavor = load_from_envvar('CAFFE_ROOT')
elif 'CAFFE_HOME' in os.environ:
executable, version, flavor = load_from_envvar('CAFFE_HOME')
else:
executable, version, flavor = load_from_path()
option_list['caffe'] = {
'executable': executable,
'version': version,
'flavor': flavor,
'multi_gpu': (flavor == 'BVLC' or parse_version(version) >= parse_version(0, 12)),
'cuda_enabled': (len(device_query.get_devices()) > 0),
}
| ethantang95/DIGITS | digits/config/caffe.py | Python | bsd-3-clause | 7,545 |
import threading
import time
from . import _impl
from .common import *
from .connection import *
from .networktablenode import NetworkTableNode
from .type import NetworkTableEntryTypeManager
import logging
logger = logging.getLogger('nt')
__all__ = ["NetworkTableServer"]
class ServerConnectionState:
"""Represents the state of a connection to the server
"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
# represents that the server has received the connection from the client but
# has not yet received the client hello
GOT_CONNECTION_FROM_CLIENT = ServerConnectionState("GOT_CONNECTION_FROM_CLIENT")
# represents that the client is in a connected non-error state
CONNECTED_TO_CLIENT = ServerConnectionState("CONNECTED_TO_CLIENT")
# represents that the client has disconnected from the server
CLIENT_DISCONNECTED = ServerConnectionState("CLIENT_DISCONNECTED")
class ServerError(ServerConnectionState):
"""Represents that the client is in an error state
"""
def __init__(self, e):
"""Create a new error state
:param e:
"""
ServerConnectionState.__init__(self, "SERVER_ERROR")
self.e = e
def getException(self):
""":returns: the exception that caused the client connection to enter
an error state
"""
return self.e
def __str__(self):
return "SERVER_ERROR: %s" % self.e
class ServerConnectionAdapter:
"""Object that adapts messages from a client to the server
"""
def gotoState(self, newState):
if self.connectionState != newState:
logger.info("%s entered connection state: %s", self, newState)
self.connectionState = newState
def __init__(self, stream, entryStore, adapterListener, typeManager):
"""Create a server connection adapter for a given stream
:param stream:
:param entryStore:
:param adapterListener:
"""
self.connection = NetworkTableConnection(stream, typeManager)
self.entryStore = entryStore
self.adapterListener = adapterListener
self.connectionState = None
self.gotoState(GOT_CONNECTION_FROM_CLIENT)
self.readManager = ReadManager(self,
self.connection, name="Server Connection Reader Thread")
self.readManager.start()
def __str__(self):
return 'Server 0x%08x' % id(self)
def badMessage(self, e):
self.gotoState(ServerError(e))
self.adapterListener.close(self, True)
def ioError(self, e):
if isinstance(e, StreamEOF):
self.gotoState(CLIENT_DISCONNECTED)
else:
self.gotoState(ServerError(e))
self.adapterListener.close(self, False)
def shutdown(self, closeStream):
"""stop the read thread and close the stream
"""
self.readManager.stop()
if closeStream:
self.connection.close()
def keepAlive(self):
pass # just let it happen
def clientHello(self, protocolRevision):
if self.connectionState != GOT_CONNECTION_FROM_CLIENT:
raise BadMessageError("A server should not receive a client hello after it has already connected/entered an error state")
if protocolRevision != PROTOCOL_REVISION:
self.connection.sendProtocolVersionUnsupported()
raise BadMessageError("Client Connected with bad protocol revision: 0x%x" % protocolRevision)
else:
self.entryStore.sendServerHello(self.connection)
self.gotoState(CONNECTED_TO_CLIENT)
def protocolVersionUnsupported(self, protocolRevision):
raise BadMessageError("A server should not receive a protocol version unsupported message")
def serverHelloComplete(self):
raise BadMessageError("A server should not receive a server hello complete message")
def offerIncomingAssignment(self, entry):
self.entryStore.offerIncomingAssignment(entry)
def offerIncomingUpdate(self, entry, sequenceNumber, value):
self.entryStore.offerIncomingUpdate(entry, sequenceNumber, value)
def getEntry(self, id):
return self.entryStore.getEntry(id)
def sendEntry(self, entryBytes):
try:
if self.connectionState == CONNECTED_TO_CLIENT:
self.connection.sendEntry(entryBytes)
except IOError as e:
self.ioError(e)
def flush(self):
try:
self.connection.flush()
except IOError as e:
self.ioError(e)
def getConnectionState(self):
""":returns: the state of the connection
"""
return self.connectionState
def ensureAlive(self):
try:
self.connection.sendKeepAlive()
except IOError as e:
self.ioError(e)
class ServerNetworkTableEntryStore(AbstractNetworkTableEntryStore):
"""The entry store for a {@link NetworkTableServer}
"""
def __init__(self, listenerManager):
"""Create a new Server entry store
:param listenerManager: the listener manager that fires events from
this entry store
"""
AbstractNetworkTableEntryStore.__init__(self, listenerManager)
self.nextId = 0
def addEntry(self, newEntry):
with self.entry_lock:
entry = self.namedEntries.get(newEntry.name)
if entry is None:
newEntry.setId(self.nextId)
self.nextId += 1
self.idEntries[newEntry.getId()] = newEntry
self.namedEntries[newEntry.name] = newEntry
return True
return False
def updateEntry(self, entry, sequenceNumber, value):
with self.entry_lock:
if entry.putValue(sequenceNumber, value):
return True
return False
def sendServerHello(self, connection):
"""Send all entries in the entry store as entry assignments in a
single transaction
:param connection:
"""
transaction = []
with self.entry_lock:
# Cannot use sendEntry while holding entry lock!
for entry in self.namedEntries.values():
transaction.append(entry.getAssignmentBytes())
for entry in transaction:
connection.sendEntry(entry)
connection.sendServerHelloComplete()
connection.flush()
class ServerConnectionList:
"""A list of connections that the server currently has
"""
def __init__(self):
self.connections = []
self.connectionsLock = _impl.create_rlock('server_conn_lock')
def add(self, connection):
"""Add a connection to the list
:param connection:
"""
with self.connectionsLock:
self.connections.append(connection)
def close(self, connectionAdapter, closeStream):
with self.connectionsLock:
try:
self.connections.remove(connectionAdapter)
except ValueError:
return
logger.info("Close: %s", connectionAdapter)
connectionAdapter.shutdown(closeStream)
def closeAll(self):
"""close all connections and remove them
"""
with self.connectionsLock:
for connection in self.connections:
logger.info("Close: %s", connection)
connection.shutdown(True)
del self.connections[:]
def sendEntry(self, entryBytes):
with self.connectionsLock:
for connection in self.connections:
connection.sendEntry(entryBytes)
def flush(self):
with self.connectionsLock:
for connection in self.connections:
connection.flush()
def ensureAlive(self):
with self.connectionsLock:
for connection in self.connections:
connection.ensureAlive()
class NetworkTableServer(NetworkTableNode):
"""A server node in NetworkTables 2.0
"""
def __init__(self, streamProvider):
"""Create a NetworkTable Server
:param streamProvider:
"""
NetworkTableNode.__init__(self, ServerNetworkTableEntryStore(self))
self.typeManager = NetworkTableEntryTypeManager()
self.streamProvider = streamProvider
self.connectionList = ServerConnectionList()
self.writeManager = WriteManager(self.connectionList, self.entryStore, None)
self.entryStore.setIncomingReceiver(self.writeManager)
self.entryStore.setOutgoingReceiver(self.writeManager)
# start incoming stream monitor
self.running = True
self.monitorThread = threading.Thread(target=self._incomingMonitor,
name="Server Incoming Stream Monitor Thread")
self.monitorThread.daemon = True
self.monitorThread.start()
# start write manager
self.writeManager.start()
def close(self):
try:
self.running = False
self.monitorThread.join()
self.writeManager.stop()
self.connectionList.closeAll()
time.sleep(1) #To get around bug where an error will occur in select if the socket server is closed before all sockets finish closing
self.streamProvider.close()
time.sleep(1)
except IOError as e:
logger.error("Error during close: %s", e)
def isConnected(self):
return True
def isServer(self):
return True
def _incomingMonitor(self):
while self.running:
try:
newStream = self.streamProvider.accept()
if newStream is not None:
connectionAdapter = ServerConnectionAdapter(newStream, self.entryStore, self.connectionList, self.typeManager)
self.connectionList.add(connectionAdapter)
except IOError:
pass #could not get a new stream for some reason. ignore and continue
| schmirob000/2016-Stronghold | src/org/usfirst/frc/team4915/stronghold/vision/jetson/imgExplore2/networktables2/server.py | Python | mit | 10,070 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import logging
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.plotting import pretty_plot
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
"""
This module implements plotter for DOS and band structure.
"""
logger = logging.getLogger(__name__)
class PhononDosPlotter(object):
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
Args:
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, stack=False, sigma=None):
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies']
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Frequencies (THz)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class PhononBSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self):
"""
Get the data nicely formatted for a plot
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a qpoint (the
x axis) and the labels (None if no label)
frequencies: A list (one element for each branch) of frequencies for
each qpoint: [branch][qpoint][mode]. The data is
stored by branch to facilitate the plotting
lattice: The reciprocal lattice.
"""
distance = []
frequency = []
ticks = self.get_ticks()
for b in self._bs.branches:
frequency.append([])
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
for i in range(self._nb_bands):
frequency[-1].append(
[self._bs.bands[i][j]
for j in range(b['start_index'], b['end_index'] + 1)])
return {'ticks': ticks, 'distances': distance, 'frequency': frequency,
'lattice': self._bs.lattice_rec.as_dict()}
def get_plot(self, ylim=None):
"""
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
"""
plt = pretty_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
try:
rc('text', usetex=True)
except:
# Fall back on non Tex if errored.
rc('text', usetex=False)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['frequency'][d][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color='k')
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{Frequency\ (THz)}$'
plt.ylabel(ylabel, fontsize=30)
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, ylim=None):
"""
Show the plot using matplotlib.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
"""
plt = self.get_plot(ylim)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.qpoints[0].label
previous_branch = self._bs.branches[0]['name']
for i, c in enumerate(self._bs.qpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[i] for e in data['frequency']][d],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for q in self._bs.qpoints:
if q.label:
labels[q.label] = q.frac_coords
lines = []
for b in self._bs.branches:
lines.append([self._bs.qpoints[b['start_index']].frac_coords,
self._bs.qpoints[b['end_index']].frac_coords])
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
| matk86/pymatgen | pymatgen/phonon/plotter.py | Python | mit | 15,693 |
# Change the following to True to get a much more comprehensive set of tests
# to run, albeit, which take considerably longer.
full_tests = False
def test(fmt, *args):
print('{:8s}'.format(fmt) + '>' + fmt.format(*args) + '<')
test("}}{{")
test("{}-{}", 1, [4, 5])
test("{0}-{1}", 1, [4, 5])
test("{1}-{0}", 1, [4, 5])
test("{:x}", 1)
test("{!r}", 2)
test("{:x}", 0x10)
test("{!r}", "foo")
test("{!s}", "foo")
test("{0!r:>10s} {0!s:>10s}", "foo")
test("{:4b}", 10)
test("{:4c}", 48)
test("{:4d}", 123)
test("{:4n}", 123)
test("{:4o}", 123)
test("{:4x}", 123)
test("{:4X}", 123)
test("{:4,d}", 12345678)
test("{:#4b}", 10)
test("{:#4o}", 123)
test("{:#4x}", 123)
test("{:#4X}", 123)
test("{:#4d}", 0)
test("{:#4b}", 0)
test("{:#4o}", 0)
test("{:#4x}", 0)
test("{:#4X}", 0)
test("{:<6s}", "ab")
test("{:>6s}", "ab")
test("{:^6s}", "ab")
test("{:.1s}", "ab")
test("{: <6d}", 123)
test("{: <6d}", -123)
test("{:0<6d}", 123)
test("{:0<6d}", -123)
test("{:@<6d}", 123)
test("{:@<6d}", -123)
test("{:@< 6d}", 123)
test("{:@< 6d}", -123)
test("{:@<+6d}", 123)
test("{:@<+6d}", -123)
test("{:@<-6d}", 123)
test("{:@<-6d}", -123)
test("{:@>6d}", -123)
test("{:@<6d}", -123)
test("{:@=6d}", -123)
test("{:06d}", -123)
test("{:>20}", "foo")
test("{:^20}", "foo")
test("{:<20}", "foo")
# nested format specifiers
print("{:{}}".format(123, '#>10'))
print("{:{}{}{}}".format(123, '#', '>', '10'))
print("{0:{1}{2}}".format(123, '#>', '10'))
print("{text:{align}{width}}".format(text="foo", align="<", width=20))
print("{text:{align}{width}}".format(text="foo", align="^", width=10))
print("{text:{align}{width}}".format(text="foo", align=">", width=30))
print("{foo}/foo".format(foo="bar"))
print("{}".format(123, foo="bar"))
print("{}-{foo}".format(123, foo="bar"))
def test_fmt(conv, fill, alignment, sign, prefix, width, precision, type, arg):
fmt = '{'
if conv:
fmt += '!'
fmt += conv
fmt += ':'
if alignment:
fmt += fill
fmt += alignment
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
if fill == '0' and alignment == '=':
fmt = '{:'
fmt += sign
fmt += prefix
fmt += width
if precision:
fmt += '.'
fmt += precision
fmt += type
fmt += '}'
test(fmt, arg)
int_nums = (-1234, -123, -12, -1, 0, 1, 12, 123, 1234, True, False)
int_nums2 = (-12, -1, 0, 1, 12, True, False)
if full_tests:
for type in ('', 'b', 'd', 'o', 'x', 'X'):
for width in ('', '1', '3', '5', '7'):
for alignment in ('', '<', '>', '=', '^'):
for fill in ('', ' ', '0', '@'):
for sign in ('', '+', '-', ' '):
for prefix in ('', '#'):
for num in int_nums:
test_fmt('', fill, alignment, sign, prefix, width, '', type, num)
if full_tests:
for width in ('', '1', '2'):
for alignment in ('', '<', '>', '^'):
for fill in ('', ' ', '0', '@'):
test_fmt('', fill, alignment, '', '', width, '', 'c', 48)
if full_tests:
for conv in ('', 'r', 's'):
for width in ('', '1', '4', '10'):
for alignment in ('', '<', '>', '^'):
for fill in ('', ' ', '0', '@'):
for str in ('', 'a', 'bcd', 'This is a test with a longer string'):
test_fmt(conv, fill, alignment, '', '', width, '', 's', str)
# tests for errors in format string
try:
'{0:0}'.format('zzz')
except (ValueError):
print('ValueError')
try:
'{1:}'.format(1)
except IndexError:
print('IndexError')
try:
'}'.format('zzzz')
except ValueError:
print('ValueError')
# end of format parsing conversion specifier
try:
'{!'.format('a')
except ValueError:
print('ValueError')
# unknown conversion specifier
try:
'abc{!d}'.format('1')
except ValueError:
print('ValueError')
try:
'{abc'.format('zzzz')
except ValueError:
print('ValueError')
# expected ':' after specifier
try:
'{!s :}'.format(2)
except ValueError:
print('ValueError')
try:
'{}{0}'.format(1, 2)
except ValueError:
print('ValueError')
try:
'{1:}'.format(1)
except IndexError:
print('IndexError')
try:
'{ 0 :*^10}'.format(12)
except KeyError:
print('KeyError')
try:
'{0}{}'.format(1)
except ValueError:
print('ValueError')
try:
'{}{}'.format(1)
except IndexError:
print('IndexError')
try:
'{0:+s}'.format('1')
except ValueError:
print('ValueError')
try:
'{0:+c}'.format(1)
except ValueError:
print('ValueError')
try:
'{0:s}'.format(1)
except ValueError:
print('ValueError')
try:
'{:*"1"}'.format('zz')
except ValueError:
print('ValueError')
# unknown format code for str arg
try:
'{:X}'.format('zz')
except ValueError:
print('ValueError')
| adamkh/micropython | tests/basics/string_format.py | Python | mit | 4,990 |
"""
WSGI config for src project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.settings")
application = get_wsgi_application()
| mezklador/drf1 | src/wsgi.py | Python | cc0-1.0 | 383 |
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2005-2006 Ali Sabil <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import xml.sax.saxutils as xml
class LiveService(object):
CONTACTS = ("contacts.msn.com", "MBI")
MESSENGER = ("messenger.msn.com", "?id=507")
MESSENGER_CLEAR = ("messengerclear.live.com", "MBI_KEY_OLD")
MESSENGER_SECURE = ("messengersecure.live.com", "MBI_SSL")
SPACES = ("spaces.live.com", "MBI")
STORAGE = ("storage.msn.com", "MBI")
TB = ("http://Passport.NET/tb", None)
VOICE = ("voice.messenger.msn.com", "?id=69264")
@classmethod
def url_to_service(cls, url):
for attr_name in dir(cls):
if attr_name.startswith('_'):
continue
attr = getattr(cls, attr_name)
if isinstance(attr, tuple) and attr[0] == url:
return attr
return None
def transport_headers():
"""Returns a dictionary, containing transport (http) headers
to use for the request"""
return {}
def soap_action():
"""Returns the SOAPAction value to pass to the transport
or None if no SOAPAction needs to be specified"""
return None
def soap_header(account, password):
"""Returns the SOAP xml header"""
return """
<ps:AuthInfo xmlns:ps="http://schemas.microsoft.com/Passport/SoapServices/PPCRL" Id="PPAuthInfo">
<ps:HostingApp>{7108E71A-9926-4FCB-BCC9-9A9D3F32E423}</ps:HostingApp>
<ps:BinaryVersion>4</ps:BinaryVersion>
<ps:UIVersion>1</ps:UIVersion>
<ps:Cookies/>
<ps:RequestParams>AQAAAAIAAABsYwQAAAAxMDMz</ps:RequestParams>
</ps:AuthInfo>
<wsse:Security xmlns:wsse="http://schemas.xmlsoap.org/ws/2003/06/secext">
<wsse:UsernameToken Id="user">
<wsse:Username>%(account)s</wsse:Username>
<wsse:Password>%(password)s</wsse:Password>
</wsse:UsernameToken>
</wsse:Security>""" % {'account': xml.escape(account),
'password': xml.escape(password)}
def soap_body(*tokens):
"""Returns the SOAP xml body"""
token_template = """
<wst:RequestSecurityToken xmlns:wst="http://schemas.xmlsoap.org/ws/2004/04/trust" Id="RST%(id)d">
<wst:RequestType>http://schemas.xmlsoap.org/ws/2004/04/security/trust/Issue</wst:RequestType>
<wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2002/12/policy">
<wsa:EndpointReference xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/03/addressing">
<wsa:Address>%(address)s</wsa:Address>
</wsa:EndpointReference>
</wsp:AppliesTo>
%(policy_reference)s
</wst:RequestSecurityToken>"""
policy_reference_template = """
<wsse:PolicyReference xmlns:wsse="http://schemas.xmlsoap.org/ws/2003/06/secext" URI=%(uri)s/>"""
tokens = list(tokens)
if LiveService.TB in tokens:
tokens.remove(LiveService.TB)
assert(len(tokens) >= 1)
body = token_template % \
{'id': 0,
'address': xml.escape(LiveService.TB[0]),
'policy_reference': ''}
for id, token in enumerate(tokens):
if token[1] is not None:
policy_reference = policy_reference_template % \
{'uri': xml.quoteattr(token[1])}
else:
policy_reference = ""
t = token_template % \
{'id': id + 1,
'address': xml.escape(token[0]),
'policy_reference': policy_reference}
body += t
return '<ps:RequestMultipleSecurityTokens ' \
'xmlns:ps="http://schemas.microsoft.com/Passport/SoapServices/PPCRL" ' \
'Id="RSTS">%s</ps:RequestMultipleSecurityTokens>' % body
def process_response(soap_response):
body = soap_response.body
return body.findall("./wst:RequestSecurityTokenResponseCollection/" \
"wst:RequestSecurityTokenResponse")
| billiob/papyon | papyon/service/description/SingleSignOn/RequestMultipleSecurityTokens.py | Python | gpl-2.0 | 4,656 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsComposerEffects.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2012 by Dr. Horst Düster / Dr. Marco Hugentobler'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import qgis
from PyQt4.QtCore import QFileInfo
from PyQt4.QtXml import QDomDocument
from PyQt4.QtGui import (QPainter, QColor)
from qgis.core import (QgsComposerShape,
QgsRectangle,
QgsComposition,
QgsMapRenderer
)
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest,
expectedFailure
)
from qgscompositionchecker import QgsCompositionChecker
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsComposerEffects(TestCase):
def __init__(self, methodName):
"""Run once on class initialisation."""
unittest.TestCase.__init__(self, methodName)
# create composition
self.mMapRenderer = QgsMapRenderer()
self.mComposition = QgsComposition(self.mMapRenderer)
self.mComposition.setPaperSize(297, 210)
self.mComposerRect1 = QgsComposerShape(20, 20, 150, 100, self.mComposition)
self.mComposerRect1.setShapeType(QgsComposerShape.Rectangle)
self.mComposerRect1.setBackgroundColor(QColor.fromRgb(255, 150, 0))
self.mComposition.addComposerShape(self.mComposerRect1)
self.mComposerRect2 = QgsComposerShape(50, 50, 150, 100, self.mComposition)
self.mComposerRect2.setShapeType(QgsComposerShape.Rectangle)
self.mComposerRect2.setBackgroundColor(QColor.fromRgb(0, 100, 150))
self.mComposition.addComposerShape(self.mComposerRect2)
def testBlendModes(self):
"""Test that blend modes work for composer items."""
self.mComposerRect2.setBlendMode(QPainter.CompositionMode_Multiply)
checker = QgsCompositionChecker('composereffects_blend', self.mComposition)
myTestResult, myMessage = checker.testComposition()
self.mComposerRect2.setBlendMode(QPainter.CompositionMode_SourceOver)
assert myTestResult == True, myMessage
def testTransparency(self):
"""Test that transparency works for composer items."""
self.mComposerRect2.setTransparency( 50 )
checker = QgsCompositionChecker('composereffects_transparency', self.mComposition)
myTestResult, myMessage = checker.testComposition()
self.mComposerRect2.setTransparency( 100 )
assert myTestResult == True, myMessage
if __name__ == '__main__':
unittest.main()
| kiith-sa/QGIS | tests/src/python/test_qgscomposereffects.py | Python | gpl-2.0 | 3,073 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/Person/_ChangedSince.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._changedsincebase import ChangedSinceBase
#-------------------------------------------------------------------------
#
# ChangedSince
#
#-------------------------------------------------------------------------
class ChangedSince(ChangedSinceBase):
"""Rule that checks for persons changed since a specific time."""
labels = [ _('Changed after:'), _('but before:') ]
name = _('Persons changed after <date time>')
description = _("Matches person records changed after a specified "
"date-time (yyyy-mm-dd hh:mm:ss) or in the range, if a second "
"date-time is given.")
| prculley/gramps | gramps/gen/filters/rules/person/_changedsince.py | Python | gpl-2.0 | 1,921 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: group
version_added: "0.0.2"
short_description: Add or remove groups
requirements:
- groupadd
- groupdel
- groupmod
description:
- Manage presence of groups on a host.
- For Windows targets, use the M(win_group) module instead.
options:
name:
description:
- Name of the group to manage.
type: str
required: true
gid:
description:
- Optional I(GID) to set for the group.
type: int
state:
description:
- Whether the group should be present or not on the remote host.
type: str
choices: [ absent, present ]
default: present
system:
description:
- If I(yes), indicates that the group created is a system group.
type: bool
default: no
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
- This is useful in environments that use centralized authentication when you want to manipulate the local groups.
(e.g. it uses C(lgroupadd) instead of C(groupadd)).
- This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
type: bool
default: no
version_added: "2.6"
non_unique:
description:
- This option allows to change the group ID to a non-unique value. Requires C(gid).
- Not supported on macOS or BusyBox distributions.
type: bool
default: no
version_added: "2.8"
seealso:
- module: user
- module: win_group
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = '''
- name: Ensure group "somegroup" exists
group:
name: somegroup
state: present
- name: Ensure group "docker" exists with correct gid
group:
name: docker
state: present
gid: 1750
'''
RETURN = r'''
gid:
description: Group ID of the group.
returned: When C(state) is 'present'
type: int
sample: 1001
name:
description: Group name
returned: always
type: str
sample: users
state:
description: Whether the group is present or not
returned: always
type: str
sample: 'absent'
system:
description: Whether the group is a system group or not
returned: When C(state) is 'present'
type: bool
sample: False
'''
import grp
import os
from ansible.module_utils._text import to_bytes
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.sys_info import get_platform_subclass
class Group(object):
"""
This is a generic Group manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- group_del()
- group_add()
- group_mod()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
GROUPFILE = '/etc/group'
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Group)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
self.local = module.params['local']
self.non_unique = module.params['non_unique']
def execute_command(self, cmd):
return self.module.run_command(cmd)
def group_del(self):
if self.local:
command_name = 'lgroupdel'
else:
command_name = 'groupdel'
cmd = [self.module.get_bin_path(command_name, True), self.name]
return self.execute_command(cmd)
def _local_check_gid_exists(self):
if self.gid:
for gr in grp.getgrall():
if self.gid == gr.gr_gid and self.name != gr.gr_name:
self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name))
def group_add(self, **kwargs):
if self.local:
command_name = 'lgroupadd'
self._local_check_gid_exists()
else:
command_name = 'groupadd'
cmd = [self.module.get_bin_path(command_name, True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
elif key == 'system' and kwargs[key] is True:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
if self.local:
command_name = 'lgroupmod'
self._local_check_gid_exists()
else:
command_name = 'groupmod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('-g')
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self):
# The grp module does not distinguish between local and directory accounts.
# It's output cannot be used to determine whether or not a group exists locally.
# It returns True if the group exists locally or in the directory, so instead
# look in the local GROUP file for an existing account.
if self.local:
if not os.path.exists(self.GROUPFILE):
self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE))
exists = False
name_test = '{0}:'.format(self.name)
with open(self.GROUPFILE, 'rb') as f:
reversed_lines = f.readlines()[::-1]
for line in reversed_lines:
if line.startswith(to_bytes(name_test)):
exists = True
break
if not exists:
self.module.warn(
"'local: true' specified and group was not found in {file}. "
"The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE))
return exists
else:
try:
if grp.getgrnam(self.name):
return True
except KeyError:
return False
def group_info(self):
if not self.group_exists():
return False
try:
info = list(grp.getgrnam(self.name))
except KeyError:
return False
return info
# ===========================================
class SunOS(Group):
"""
This is a SunOS Group manipulation class. Solaris doesn't have
the 'system' group concept.
This overrides the following methods from the generic class:-
- group_add()
"""
platform = 'SunOS'
distribution = None
GROUPFILE = '/etc/group'
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class AIX(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id=' + str(kwargs[key]))
elif key == 'system' and kwargs[key] is True:
cmd.append('-a')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id=' + str(kwargs[key]))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class FreeBsdGroup(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
class DragonFlyBsdGroup(FreeBsdGroup):
"""
This is a DragonFlyBSD Group manipulation class.
It inherits all behaviors from FreeBsdGroup class.
"""
platform = 'DragonFly'
# ===========================================
class DarwinGroup(Group):
"""
This is a Mac macOS Darwin Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
group manipulation are done using dseditgroup(1).
"""
platform = 'Darwin'
distribution = None
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += ['-o', 'create']
if self.gid is not None:
cmd += ['-i', str(self.gid)]
elif 'system' in kwargs and kwargs['system'] is True:
gid = self.get_lowest_available_system_gid()
if gid is not False:
self.gid = str(gid)
cmd += ['-i', str(self.gid)]
cmd += ['-L', self.name]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_del(self):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += ['-o', 'delete']
cmd += ['-L', self.name]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_mod(self, gid=None):
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += ['-o', 'edit']
if gid is not None:
cmd += ['-i', str(gid)]
cmd += ['-L', self.name]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
def get_lowest_available_system_gid(self):
# check for lowest available system gid (< 500)
try:
cmd = [self.module.get_bin_path('dscl', True)]
cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID']
(rc, out, err) = self.execute_command(cmd)
lines = out.splitlines()
highest = 0
for group_info in lines:
parts = group_info.split(' ')
if len(parts) > 1:
gid = int(parts[-1])
if gid > highest and gid < 500:
highest = gid
if highest == 0 or highest == 499:
return False
return (highest + 1)
except Exception:
return False
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBsdGroup(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class BusyBoxGroup(Group):
"""
BusyBox group manipulation class for systems that have addgroup and delgroup.
It overrides the following methods:
- group_add()
- group_del()
- group_mod()
"""
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('addgroup', True)]
if self.gid is not None:
cmd.extend(['-g', str(self.gid)])
if self.system:
cmd.append('-S')
cmd.append(self.name)
return self.execute_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('delgroup', True), self.name]
return self.execute_command(cmd)
def group_mod(self, **kwargs):
# Since there is no groupmod command, modify /etc/group directly
info = self.group_info()
if self.gid is not None and self.gid != info[2]:
with open('/etc/group', 'rb') as f:
b_groups = f.read()
b_name = to_bytes(self.name)
b_current_group_string = b'%s:x:%d:' % (b_name, info[2])
b_new_group_string = b'%s:x:%d:' % (b_name, self.gid)
if b':%d:' % self.gid in b_groups:
self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid))
if self.module.check_mode:
return 0, '', ''
b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string)
with open('/etc/group', 'wb') as f:
f.write(b_new_groups)
return 0, '', ''
return None, '', ''
class AlpineGroup(BusyBoxGroup):
platform = 'Linux'
distribution = 'Alpine'
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True),
gid=dict(type='int'),
system=dict(type='bool', default=False),
local=dict(type='bool', default=False),
non_unique=dict(type='bool', default=False),
),
supports_check_mode=True,
required_if=[
['non_unique', True, ['gid']],
],
)
group = Group(module)
module.debug('Group instantiated - platform %s' % group.platform)
if group.distribution:
module.debug('Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = group.name
result['state'] = group.state
if group.state == 'absent':
if group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_del()
if rc != 0:
module.fail_json(name=group.name, msg=err)
elif group.state == 'present':
if not group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_add(gid=group.gid, system=group.system)
else:
(rc, out, err) = group.group_mod(gid=group.gid)
if rc is not None and rc != 0:
module.fail_json(name=group.name, msg=err)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if group.group_exists():
info = group.group_info()
result['system'] = group.system
result['gid'] = info[2]
module.exit_json(**result)
if __name__ == '__main__':
main()
| indrajitr/ansible | lib/ansible/modules/group.py | Python | gpl-3.0 | 19,765 |
# -*- coding: utf-8 -*-
#
# test_transfer.py
#
# Copyright (C) 2012 Bro <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from twisted.trial import unittest
from deluge.transfer import DelugeTransferProtocol
import base64
import deluge.rencode as rencode
class TransferTestClass(DelugeTransferProtocol):
def __init__(self):
DelugeTransferProtocol.__init__(self)
self.transport = self
self.messages_out = []
self.messages_in = []
self.packet_count = 0
def write(self, message):
"""
Called by DelugeTransferProtocol class
This simulates the write method of the self.transport in DelugeTransferProtocol.
"""
self.messages_out.append(message)
def message_received(self, message):
"""
This method overrides message_received is DelugeTransferProtocol and is
called with the complete message as it was sent by DelugeRPCProtocol
"""
self.messages_in.append(message)
def get_messages_out_joined(self):
return b"".join(self.messages_out)
def get_messages_in(self):
return self.messages_in
def dataReceived_old_protocol(self, data):
"""
This is the original method logic (as close as possible) for handling data receival on the client
:param data: a zlib compressed string encoded with rencode.
"""
from datetime import timedelta
import zlib
print "\n=== New Data Received ===\nBytes received:", len(data)
if self._buffer:
# We have some data from the last dataReceived() so lets prepend it
print "Current buffer:", len(self._buffer) if self._buffer else "0"
data = self._buffer + data
self._buffer = None
self.packet_count += 1
self._bytes_received += len(data)
while data:
print "\n-- Handle packet data --"
print "Bytes received:", self._bytes_received
print "Current data:", len(data)
if self._message_length == 0:
# handle_new_message uses _buffer so set data to _buffer.
self._buffer = data
self._handle_new_message()
data = self._buffer
self._buffer = None
self.packet_count = 1
print "New message of length:", self._message_length
dobj = zlib.decompressobj()
try:
request = rencode.loads(dobj.decompress(data))
print "Successfully loaded message",
print " - Buffer length: %d, data length: %d, unused length: %d" % (len(data), \
len(data) - len(dobj.unused_data), len(dobj.unused_data))
print "Packet count:", self.packet_count
except Exception, e:
#log.debug("Received possible invalid message (%r): %s", data, e)
# This could be cut-off data, so we'll save this in the buffer
# and try to prepend it on the next dataReceived()
self._buffer = data
print "Failed to load buffer (size %d): %s" % (len(self._buffer), str(e))
return
else:
data = dobj.unused_data
self._message_length = 0
self.message_received(request)
class DelugeTransferProtocolTestCase(unittest.TestCase):
def setUp(self):
"""
The expected messages corresponds to the test messages (msg1, msg2) after they've been processed
by DelugeTransferProtocol.send, which means that they've first been encoded with pickle,
and then compressed with zlib.
The expected messages are encoded in base64 to easily including it here in the source.
So before comparing the results with the expected messages, the expected messages must be decoded,
or the result message be encoded in base64.
"""
self.transfer = TransferTestClass()
self.msg1 = (0, 1, {"key_int": 1242429423}, {"key_str": "some string"}, {"key_bool": True})
self.msg2 = (2, 3, {"key_float": 12424.29423},
{"key_unicode": u"some string"},
{"key_dict_with_tuple": {"key_tuple": (1, 2, 3)}},
{"keylist": [4, "5", 6.7]})
self.msg1_expected_compressed_base64 = "RAAAADF4nDvKwJjenp1aGZ+ZV+Lgxfv9PYRXXFLU"\
"XZyfm6oAZGTmpad3gAST8vNznAEAJhSQ"
self.msg2_expected_compressed_base64 = "RAAAAF14nDvGxJzemZ1aGZ+Wk59Y4uTmpKib3g3i"\
"l+ZlJuenpHYX5+emKhSXFGXmpadPBkmkZCaXxJdn"\
"lmTEl5QW5KRCdIOZhxmBhrUDuTmZxSWHWRpNnRyu"\
"paUBAHYlJxI="
def test_send_one_message(self):
"""
Send one message and test that it has been sent correctoly to the
method 'write' in self.transport.
"""
self.transfer.transfer_message(self.msg1)
# Get the data as sent by DelugeTransferProtocol
messages = self.transfer.get_messages_out_joined()
base64_encoded = base64.b64encode(messages)
self.assertEquals(base64_encoded, self.msg1_expected_compressed_base64)
def test_receive_one_message(self):
"""
Receive one message and test that it has been sent to the
method 'message_received'.
"""
self.transfer.dataReceived(base64.b64decode(self.msg1_expected_compressed_base64))
# Get the data as sent by DelugeTransferProtocol
messages = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(messages))
def test_receive_old_message(self):
"""
Receive an old message (with no header) and verify that the data is discarded.
"""
self.transfer.dataReceived(rencode.dumps(self.msg1))
self.assertEquals(len(self.transfer.get_messages_in()), 0)
self.assertEquals(self.transfer._message_length, 0)
self.assertEquals(len(self.transfer._buffer), 0)
def test_receive_two_concatenated_messages(self):
"""
This test simply concatenates two messsages (as they're sent over the network),
and lets DelugeTransferProtocol receive the data as one string.
"""
two_concatenated = base64.b64decode(self.msg1_expected_compressed_base64) + base64.b64decode(self.msg2_expected_compressed_base64)
self.transfer.dataReceived(two_concatenated)
# Get the data as sent by DelugeTransferProtocol
message1 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(message1))
message2 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg2), rencode.dumps(message2))
def test_receive_three_messages_in_parts(self):
"""
This test concatenates three messsages (as they're sent over the network),
and lets DelugeTransferProtocol receive the data in multiple parts.
"""
msg_bytes = base64.b64decode(self.msg1_expected_compressed_base64) + \
base64.b64decode(self.msg2_expected_compressed_base64) + \
base64.b64decode(self.msg1_expected_compressed_base64)
packet_size = 40
one_message_byte_count = len(base64.b64decode(self.msg1_expected_compressed_base64))
two_messages_byte_count = one_message_byte_count + len(base64.b64decode(self.msg2_expected_compressed_base64))
three_messages_byte_count = two_messages_byte_count + len(base64.b64decode(self.msg1_expected_compressed_base64))
for d in self.receive_parts_helper(msg_bytes, packet_size):
bytes_received = self.transfer.get_bytes_recv()
if bytes_received >= three_messages_byte_count:
expected_msgs_received_count = 3
elif bytes_received >= two_messages_byte_count:
expected_msgs_received_count = 2
elif bytes_received >= one_message_byte_count:
expected_msgs_received_count = 1
else:
expected_msgs_received_count = 0
# Verify that the expected number of complete messages has arrived
self.assertEquals(expected_msgs_received_count, len(self.transfer.get_messages_in()))
# Get the data as received by DelugeTransferProtocol
message1 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(message1))
message2 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg2), rencode.dumps(message2))
message3 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(message3))
# Remove underscore to enable test, or run the test directly:
# tests $ trial test_transfer.DelugeTransferProtocolTestCase._test_rencode_fail_protocol
def _test_rencode_fail_protocol(self):
"""
This test tries to test the protocol that relies on errors from rencode.
"""
msg_bytes = base64.b64decode(self.msg1_expected_compressed_base64) + \
base64.b64decode(self.msg2_expected_compressed_base64) + \
base64.b64decode(self.msg1_expected_compressed_base64)
packet_size = 149
one_message_byte_count = len(base64.b64decode(self.msg1_expected_compressed_base64))
two_messages_byte_count = one_message_byte_count + len(base64.b64decode(self.msg2_expected_compressed_base64))
three_messages_byte_count = two_messages_byte_count + len(base64.b64decode(self.msg1_expected_compressed_base64))
print
print "Msg1 size:", len(base64.b64decode(self.msg1_expected_compressed_base64)) - 4
print "Msg2 size:", len(base64.b64decode(self.msg2_expected_compressed_base64)) - 4
print "Msg3 size:", len(base64.b64decode(self.msg1_expected_compressed_base64)) - 4
print "one_message_byte_count:", one_message_byte_count
print "two_messages_byte_count:", two_messages_byte_count
print "three_messages_byte_count:", three_messages_byte_count
for d in self.receive_parts_helper(msg_bytes, packet_size, self.transfer.dataReceived_old_protocol):
bytes_received = self.transfer.get_bytes_recv()
if bytes_received >= three_messages_byte_count:
expected_msgs_received_count = 3
elif bytes_received >= two_messages_byte_count:
expected_msgs_received_count = 2
elif bytes_received >= one_message_byte_count:
expected_msgs_received_count = 1
else:
expected_msgs_received_count = 0
# Verify that the expected number of complete messages has arrived
if expected_msgs_received_count != len(self.transfer.get_messages_in()):
print "Expected number of messages received is %d, but %d have been received."\
% (expected_msgs_received_count, len(self.transfer.get_messages_in()))
# Get the data as received by DelugeTransferProtocol
message1 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(message1))
message2 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg2), rencode.dumps(message2))
message3 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(message3))
def test_receive_middle_of_header(self):
"""
This test concatenates two messsages (as they're sent over the network),
and lets DelugeTransferProtocol receive the data in two parts.
The first part contains the first message, plus two bytes of the next message.
The next part contains the rest of the message.
This is a special case, as DelugeTransferProtocol can't start parsing
a message until it has at least 4 bytes (the size of the header) to be able
to read and parse the size of the payload.
"""
two_concatenated = base64.b64decode(self.msg1_expected_compressed_base64) + base64.b64decode(self.msg2_expected_compressed_base64)
first_len = len(base64.b64decode(self.msg1_expected_compressed_base64))
# Now found the entire first message, and half the header of the next message (2 bytes into the header)
self.transfer.dataReceived(two_concatenated[:first_len+2])
# Should be 1 message in the list
self.assertEquals(1, len(self.transfer.get_messages_in()))
# Send the rest
self.transfer.dataReceived(two_concatenated[first_len+2:])
# Should be 2 messages in the list
self.assertEquals(2, len(self.transfer.get_messages_in()))
# Get the data as sent by DelugeTransferProtocol
message1 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg1), rencode.dumps(message1))
message2 = self.transfer.get_messages_in().pop(0)
self.assertEquals(rencode.dumps(self.msg2), rencode.dumps(message2))
# Needs file containing big data structure e.g. like thetorrent list as it is transfered by the daemon
#def test_simulate_big_transfer(self):
# filename = "../deluge.torrentlist"
#
# f = open(filename, "r")
# data = f.read()
# message_to_send = eval(data)
# self.transfer.transfer_message(message_to_send)
#
# # Get the data as sent to the network by DelugeTransferProtocol
# compressed_data = self.transfer.get_messages_out_joined()
# packet_size = 16000 # Or something smaller...
#
# for d in self.receive_parts_helper(compressed_data, packet_size):
# bytes_recv = self.transfer.get_bytes_recv()
# if bytes_recv < len(compressed_data):
# self.assertEquals(len(self.transfer.get_messages_in()), 0)
# else:
# self.assertEquals(len(self.transfer.get_messages_in()), 1)
# # Get the data as received by DelugeTransferProtocol
# transfered_message = self.transfer.get_messages_in().pop(0)
# # Test that the data structures are equal
# #self.assertEquals(transfered_message, message_to_send)
# #self.assertTrue(transfered_message == message_to_send)
#
# #f.close()
# #f = open("rencode.torrentlist", "w")
# #f.write(str(transfered_message))
# #f.close()
def receive_parts_helper(self, data, packet_size, receive_func=None):
byte_count = len(data)
sent_bytes = 0
while byte_count > 0:
to_receive = packet_size if byte_count > packet_size else byte_count
sent_bytes += to_receive
byte_count -= to_receive
if receive_func:
receive_func(data[:to_receive])
else:
self.transfer.dataReceived(data[:to_receive])
data = data[to_receive:]
yield
| voltaicsca/deluge | tests/test_transfer.py | Python | gpl-3.0 | 16,607 |
from openerp.osv import osv, fields
class attributes(osv.Model):
_name = "product.attribute"
def _get_float_max(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
if ids:
cr.execute("""
SELECT attribute_id, MAX(value)
FROM product_attribute_line
WHERE attribute_id in (%s)
GROUP BY attribute_id
""" % ",".join(map(str, ids)))
result.update(dict(cr.fetchall()))
return result
def _get_float_min(self, cr, uid, ids, field_name, arg, context=None):
result = dict.fromkeys(ids, 0)
if ids:
cr.execute("""
SELECT attribute_id, MIN(value)
FROM product_attribute_line
WHERE attribute_id in (%s)
GROUP BY attribute_id
""" % ",".join(map(str, ids)))
result.update(dict(cr.fetchall()))
return result
def _get_min_max(self, cr, uid, ids, context=None):
result = {}
for value in self.pool.get('product.attribute.line').browse(cr, uid, ids, context=context):
if value.type == 'float':
result[value.attribute_id.id] = True
return result.keys()
_columns = {
'name': fields.char('Name', translate=True, required=True),
'type': fields.selection([('distinct', 'Textual Value'), ('float', 'Numeric Value')], "Type", required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values'),
'attr_product_ids': fields.one2many('product.attribute.line', 'attribute_id', 'Products'),
'float_max': fields.function(_get_float_max, type='float', string="Max", store={
'product.attribute.line': (_get_min_max, ['value','attribute_id'], 20),
}),
'float_min': fields.function(_get_float_min, type='float', string="Min", store={
'product.attribute.line': (_get_min_max, ['value','attribute_id'], 20),
}),
'visible': fields.boolean('Display Filter on Website'),
}
_defaults = {
'type': 'distinct',
'visible': True,
}
class attributes_value(osv.Model):
_name = "product.attribute.value"
_columns = {
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'atr_product_ids': fields.one2many('product.attribute.line', 'value_id', 'Products'),
}
class attributes_product(osv.Model):
_name = "product.attribute.line"
_order = 'attribute_id, value_id, value'
_columns = {
'value': fields.float('Numeric Value'),
'value_id': fields.many2one('product.attribute.value', 'Textual Value'),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'product_tmpl_id': fields.many2one('product.template', 'Product', required=True),
'type': fields.related('attribute_id', 'type', type='selection',
selection=[('distinct', 'Distinct'), ('float', 'Float')], string='Type'),
}
def onchange_attribute_id(self, cr, uid, ids, attribute_id, context=None):
attribute = self.pool.get('product.attribute').browse(cr, uid, attribute_id, context=context)
return {'value': {'type': attribute.type, 'value_id': False, 'value': ''}}
class product_template(osv.Model):
_inherit = "product.template"
_columns = {
'attribute_lines': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product attributes'),
}
| ovnicraft/openerp-restaurant | website_sale/models/product_characteristics.py | Python | agpl-3.0 | 3,642 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('xblock_django.tests.test_user_service', 'common.djangoapps.xblock_django.tests.test_user_service')
from common.djangoapps.xblock_django.tests.test_user_service import *
| eduNEXT/edunext-platform | import_shims/studio/xblock_django/tests/test_user_service.py | Python | agpl-3.0 | 440 |
'''Test cases for QImage'''
import unittest
import py3kcompat as py3k
from PySide.QtGui import *
from helper import UsesQApplication, adjust_filename
xpm = [
"27 22 206 2",
" c None",
". c #FEFEFE",
"+ c #FFFFFF",
"@ c #F9F9F9",
"# c #ECECEC",
"$ c #D5D5D5",
"% c #A0A0A0",
"& c #767676",
"* c #525252",
"= c #484848",
"- c #4E4E4E",
"; c #555555",
"> c #545454",
", c #5A5A5A",
"' c #4B4B4B",
") c #4A4A4A",
"! c #4F4F4F",
"~ c #585858",
"{ c #515151",
"] c #4C4C4C",
"^ c #B1B1B1",
"/ c #FCFCFC",
"( c #FDFDFD",
"_ c #C1C1C1",
": c #848484",
"< c #616161",
"[ c #5E5E5E",
"} c #CECECE",
"| c #E2E2E2",
"1 c #E4E4E4",
"2 c #DFDFDF",
"3 c #D2D2D2",
"4 c #D8D8D8",
"5 c #D4D4D4",
"6 c #E6E6E6",
"7 c #F1F1F1",
"8 c #838383",
"9 c #8E8E8E",
"0 c #8F8F8F",
"a c #CBCBCB",
"b c #CCCCCC",
"c c #E9E9E9",
"d c #F2F2F2",
"e c #EDEDED",
"f c #B5B5B5",
"g c #A6A6A6",
"h c #ABABAB",
"i c #BBBBBB",
"j c #B0B0B0",
"k c #EAEAEA",
"l c #6C6C6C",
"m c #BCBCBC",
"n c #F5F5F5",
"o c #FAFAFA",
"p c #B6B6B6",
"q c #F3F3F3",
"r c #CFCFCF",
"s c #FBFBFB",
"t c #CDCDCD",
"u c #DDDDDD",
"v c #999999",
"w c #F0F0F0",
"x c #2B2B2B",
"y c #C3C3C3",
"z c #A4A4A4",
"A c #D7D7D7",
"B c #E7E7E7",
"C c #6E6E6E",
"D c #9D9D9D",
"E c #BABABA",
"F c #AEAEAE",
"G c #898989",
"H c #646464",
"I c #BDBDBD",
"J c #CACACA",
"K c #2A2A2A",
"L c #212121",
"M c #B7B7B7",
"N c #F4F4F4",
"O c #737373",
"P c #828282",
"Q c #4D4D4D",
"R c #000000",
"S c #151515",
"T c #B2B2B2",
"U c #D6D6D6",
"V c #D3D3D3",
"W c #2F2F2F",
"X c #636363",
"Y c #A1A1A1",
"Z c #BFBFBF",
"` c #E0E0E0",
" . c #6A6A6A",
".. c #050505",
"+. c #A3A3A3",
"@. c #202020",
"#. c #5F5F5F",
"$. c #B9B9B9",
"%. c #C7C7C7",
"&. c #D0D0D0",
"*. c #3E3E3E",
"=. c #666666",
"-. c #DBDBDB",
";. c #424242",
">. c #C2C2C2",
",. c #1A1A1A",
"'. c #2C2C2C",
"). c #F6F6F6",
"!. c #AAAAAA",
"~. c #DCDCDC",
"{. c #2D2D2D",
"]. c #2E2E2E",
"^. c #A7A7A7",
"/. c #656565",
"(. c #333333",
"_. c #464646",
":. c #C4C4C4",
"<. c #B8B8B8",
"[. c #292929",
"}. c #979797",
"|. c #EFEFEF",
"1. c #909090",
"2. c #8A8A8A",
"3. c #575757",
"4. c #676767",
"5. c #C5C5C5",
"6. c #7A7A7A",
"7. c #797979",
"8. c #989898",
"9. c #EEEEEE",
"0. c #707070",
"a. c #C8C8C8",
"b. c #111111",
"c. c #AFAFAF",
"d. c #474747",
"e. c #565656",
"f. c #E3E3E3",
"g. c #494949",
"h. c #5B5B5B",
"i. c #222222",
"j. c #353535",
"k. c #D9D9D9",
"l. c #0A0A0A",
"m. c #858585",
"n. c #E5E5E5",
"o. c #0E0E0E",
"p. c #9A9A9A",
"q. c #6F6F6F",
"r. c #868686",
"s. c #060606",
"t. c #1E1E1E",
"u. c #E8E8E8",
"v. c #A5A5A5",
"w. c #0D0D0D",
"x. c #030303",
"y. c #272727",
"z. c #131313",
"A. c #1F1F1F",
"B. c #757575",
"C. c #F7F7F7",
"D. c #414141",
"E. c #080808",
"F. c #6B6B6B",
"G. c #313131",
"H. c #C0C0C0",
"I. c #C9C9C9",
"J. c #0B0B0B",
"K. c #232323",
"L. c #434343",
"M. c #3D3D3D",
"N. c #282828",
"O. c #7C7C7C",
"P. c #252525",
"Q. c #3A3A3A",
"R. c #F8F8F8",
"S. c #1B1B1B",
"T. c #949494",
"U. c #3B3B3B",
"V. c #242424",
"W. c #383838",
"X. c #6D6D6D",
"Y. c #818181",
"Z. c #939393",
"`. c #9E9E9E",
" + c #929292",
".+ c #7D7D7D",
"++ c #ADADAD",
"@+ c #DADADA",
"#+ c #919191",
"$+ c #E1E1E1",
"%+ c #BEBEBE",
"&+ c #ACACAC",
"*+ c #9C9C9C",
"=+ c #B3B3B3",
"-+ c #808080",
";+ c #A8A8A8",
">+ c #393939",
",+ c #747474",
"'+ c #7F7F7F",
")+ c #D1D1D1",
"!+ c #606060",
"~+ c #5C5C5C",
"{+ c #686868",
"]+ c #7E7E7E",
"^+ c #787878",
"/+ c #595959",
". . . + @ # $ % & * = - ; > , ' ) ! ~ { ] ^ / . . + + ",
". ( + _ : < [ & } | 1 2 $ 3 4 5 3 6 7 + + 8 9 + . + . ",
". + 0 9 a ( 3 a b c d e c f g h i g j $ k + l m + . + ",
"+ 2 8 n o p | ( q r s . # t + + + u ^ v e w + x + + + ",
"+ y z . @ A k B 7 n + ( s | p 8 C D 2 E 4 + + F G + . ",
"# H I $ J G K L - M N . 2 O P Q R R S T U s s V W j + ",
"X Y Z @ o ` _ g ...+.( 4 @.#.m G $.%.7 &.X *.=.-.;.&.",
"Q >.C ,.'.} e + ).!.k + . + + . ~.{.> ].x f 7 ^./.k (.",
"_.:.4 @ <.[.}.|.1.2.+ + + >.} 4 B + ( @ _ 3.4.5.6.r 7.",
"3.8.9.~ 0.+ a.Q b.+ + c.d.#.=.$ |.b #.e.z ^ ; ^. .f.g.",
"-.h.+ i.S M + # p j.% n 9.5.k.H l.m.V ^.n.o.M + M p.q.",
"7 r.N s.1.R t.<.|.| u.v.~ w.x.E + s y.z.A.B.C.+ 5 D.q ",
").p.2 E.0.9 F.%.O {._ @.+ + i { [ i.G.H.P I.+ s q.} + ",
").p.6 J.R b.K.L.M.A.! b.g.K [.R M k + N.I + + >.O.+ . ",
").8.9.N.P...R R R R E.t.W n.+ Q.R.6 @.| + . + S.+ + . ",
"n }.w T.U.B.<.i.@ Y + + U.+ c u V.= B B 7 u.W.c + . + ",
"N T.# + }.X.Y.,.8.F.8 Z.[.`. +.+}.4 ++@+O.< ~.+ ( . + ",
"d #+1 + _ ~.u.$+b $.y @+| $+%+I.&+k.h W +.9.+ ( . + . ",
"w 0 |.*+. >.<.=+++++p a.p -+;+5.k.>+,+@ + . . + . + + ",
"q '+9.R.^ I.t b %.I.)+4 $+n.I.,+ .|.+ . . . + . + + + ",
". p !+( + + + + + + E 0. .-+8.f.+ + . . + + . + + + + ",
". ( A ~+{+]+^+l > /+D f.c q . + . . + + . + + + + + + "
]
class QImageTest(UsesQApplication):
'''Test case for calling setPixel with float as argument'''
def testQImageStringBuffer(self):
'''Test if the QImage signatures receiving string buffers exist.'''
img0 = QImage(adjust_filename('sample.png', __file__))
# btw let's test the bits() method
img1 = QImage(img0.bits(), img0.width(), img0.height(), img0.format())
self.assertEqual(img0, img1)
img2 = QImage(img0.bits(), img0.width(), img0.height(), img0.bytesPerLine(), img0.format())
self.assertEqual(img0, img2)
## test scanLine method
data1 = img0.scanLine(0)
data2 = img1.scanLine(0)
self.assertEqual(data1, data2)
# PySide python 3.x does not support slice yet
if not py3k.IS_PY3K:
buff = py3k.buffer(img0.bits()[:img0.bytesPerLine()])
self.assertEqual(data1, buff)
self.assertEqual(data2, buff)
def testEmptyBuffer(self):
img = QImage(py3k.buffer(''), 100, 100, QImage.Format_ARGB32)
def testEmptyStringAsBuffer(self):
img = QImage(py3k.b(''), 100, 100, QImage.Format_ARGB32)
def testXpmConstructor(self):
label = QLabel()
img = QImage(xpm)
self.assertFalse(img.isNull())
self.assertEqual(img.width(), 27)
self.assertEqual(img.height(), 22)
if __name__ == '__main__':
unittest.main()
| enthought/pyside | tests/QtGui/qimage_test.py | Python | lgpl-2.1 | 7,077 |
from lib.actions import BaseAction
class ApprovalAction(BaseAction):
def run(self, number):
s = self.client
s.table = 'change_request'
res = s.get({'number': number})
sys_id = res[0]['sys_id']
response = s.update({'approval': 'approved'}, sys_id)
return response
| armab/st2contrib | packs/servicenow/actions/approve_change.py | Python | apache-2.0 | 317 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import test
class ExampleSkipTestCase(test.TestCase):
test_counter = 0
@test.skip_test("Example usage of @test.skip_test()")
def test_skip_test_example(self):
self.fail("skip_test failed to work properly.")
@test.skip_if(True, "Example usage of @test.skip_if()")
def test_skip_if_example(self):
self.fail("skip_if failed to work properly.")
@test.skip_unless(False, "Example usage of @test.skip_unless()")
def test_skip_unless_example(self):
self.fail("skip_unless failed to work properly.")
@test.skip_if(False, "This test case should never be skipped.")
def test_001_increase_test_counter(self):
ExampleSkipTestCase.test_counter += 1
@test.skip_unless(True, "This test case should never be skipped.")
def test_002_increase_test_counter(self):
ExampleSkipTestCase.test_counter += 1
def test_003_verify_test_counter(self):
self.assertEquals(ExampleSkipTestCase.test_counter, 2,
"Tests were not skipped appropriately")
| tylertian/Openstack | openstack F/cinder/cinder/tests/test_skip_examples.py | Python | apache-2.0 | 1,837 |
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.basic.flow_action import (
FlowAction
)
class OFPFlowActionPopMpls(FlowAction):
MPLS_UNICAST = 0x8847
MPLS_MULTICAST = 0x8848
# property key
ETH_TYPE = "eth_type"
def __init__(self, type_, eth_type):
super(OFPFlowActionPopMpls, self).__init__(type_)
self._body[self.ETH_TYPE] = eth_type
@property
def eth_type(self):
return self._body[self.ETH_TYPE]
@classmethod
def create_from_packed(cls, packed):
return cls(packed[cls.TYPE], packed[cls.ETH_TYPE])
def packed_object(self):
return self._body
| haizawa/odenos | src/main/python/org/o3project/odenos/core/component/network/flow/ofpflow/ofp_flow_action_pop_mpls.py | Python | apache-2.0 | 1,666 |
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['TOMCAT_HOME'] = r'/opt/tomcat'
env_map['TOMCAT_PORT'] = r'80'
env_map['TOMCAT_URL'] = r'http://mirrors.ircam.fr/pub/apache/tomcat/tomcat-8/v8.0.29/bin/apache-tomcat-8.0.29.tar.gz'
new_script_process = {'env': env_map}
ctx.logger.info('Operation is executed with inputs {0}'.format(inputs))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
new_script_process['env'].update(inputs['process']['env'])
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('artifacts/tomcat-war-types/scripts/tomcat_install.sh'), new_script_process, operationOutputNames)
for k,v in parsed_output['outputs'].items():
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:create:{0}'.format(k)] = v
ctx.instance.runtime_properties['server_url'] = r'http://' + get_attribute(ctx, 'public_ip_address') + r':' + r'80'
ctx.instance.update()
| victorkeophila/alien4cloud-cloudify3-provider | src/test/resources/outputs/blueprints/openstack/tomcat/wrapper/Tomcat/tosca.interfaces.node.lifecycle.Standard/create/artifacts/tomcat-war-types/scripts/_a4c_tomcat_install.py | Python | apache-2.0 | 14,972 |
#!/usr/bin/env python
import errno
import os
import re
import tempfile
from hashlib import md5
class _FileCacheError(Exception):
"""Base exception class for FileCache related errors"""
class _FileCache(object):
DEPTH = 3
def __init__(self, root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self, key):
path = self._GetPath(key)
if os.path.exists(path):
with open(path) as f:
return f.read()
else:
return None
def Set(self, key, data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self, key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self, key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
"""Attempt to find the username in a cross-platform fashion."""
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError):
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
try:
os.mkdir(root_directory)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(root_directory):
# directory already exists
pass
else:
# exists but is a file, or no permissions, or...
raise
self._root_directory = root_directory
def _GetPath(self, key):
try:
hashed_key = md5(key.encode('utf-8')).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self, hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
class ParseTweet(object):
# compile once on import
regexp = {"RT": "^RT", "MT": r"^MT", "ALNUM": r"(@[a-zA-Z0-9_]+)",
"HASHTAG": r"(#[\w\d]+)", "URL": r"([http://]?[a-zA-Z\d\/]+[\.]+[a-zA-Z\d\/\.]+)"}
regexp = dict((key, re.compile(value)) for key, value in list(regexp.items()))
def __init__(self, timeline_owner, tweet):
""" timeline_owner : twitter handle of user account. tweet - 140 chars from feed; object does all computation on construction
properties:
RT, MT - boolean
URLs - list of URL
Hashtags - list of tags
"""
self.Owner = timeline_owner
self.tweet = tweet
self.UserHandles = ParseTweet.getUserHandles(tweet)
self.Hashtags = ParseTweet.getHashtags(tweet)
self.URLs = ParseTweet.getURLs(tweet)
self.RT = ParseTweet.getAttributeRT(tweet)
self.MT = ParseTweet.getAttributeMT(tweet)
# additional intelligence
if ( self.RT and len(self.UserHandles) > 0 ): # change the owner of tweet?
self.Owner = self.UserHandles[0]
return
def __str__(self):
""" for display method """
return "owner %s, urls: %d, hashtags %d, user_handles %d, len_tweet %d, RT = %s, MT = %s" % (
self.Owner, len(self.URLs), len(self.Hashtags), len(self.UserHandles),
len(self.tweet), self.RT, self.MT)
@staticmethod
def getAttributeRT(tweet):
""" see if tweet is a RT """
return re.search(ParseTweet.regexp["RT"], tweet.strip()) is not None
@staticmethod
def getAttributeMT(tweet):
""" see if tweet is a MT """
return re.search(ParseTweet.regexp["MT"], tweet.strip()) is not None
@staticmethod
def getUserHandles(tweet):
""" given a tweet we try and extract all user handles in order of occurrence"""
return re.findall(ParseTweet.regexp["ALNUM"], tweet)
@staticmethod
def getHashtags(tweet):
""" return all hashtags"""
return re.findall(ParseTweet.regexp["HASHTAG"], tweet)
@staticmethod
def getURLs(tweet):
""" URL : [http://]?[\w\.?/]+"""
return re.findall(ParseTweet.regexp["URL"], tweet)
| milmd90/TwitterBot | twitter/_file_cache.py | Python | apache-2.0 | 5,588 |
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
# SNMPv3 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc3414.txt
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
class UsmSecurityParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()),
namedtype.NamedType('msgAuthoritativeEngineBoots',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgAuthoritativeEngineTime',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgUserName',
univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
)
| catapult-project/catapult | third_party/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc3414.py | Python | bsd-3-clause | 1,161 |
# Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats)
import tarfile
import warnings
from test import test_support
from test.test_support import TESTFN, check_warnings, captured_stdout
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_data(join(src_dir, 'test.txt'), '123')
write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
if hasattr(os, "symlink"):
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
f = open(src, 'w')
f.write('cheddar')
f.close()
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
try:
shutil.rmtree(TESTFN)
except OSError:
pass
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write("spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
if os.path.exists(dst_dir):
os.rmdir(dst_dir)
def test_main():
test_support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
| ktan2020/legacy-automation | win/Lib/test/test_shutil.py | Python | mit | 30,473 |
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py 2014/07/05 09:42:21 garyo"
import collections
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(collections.UserDict):
def Alias(self, name, **kw):
if isinstance(name, SCons.Node.Alias.Alias):
return name
try:
a = self[name]
except KeyError:
a = SCons.Node.Alias.Alias(name, **kw)
self[name] = a
return a
def lookup(self, name, **kw):
try:
return self[name]
except KeyError:
return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Alias(SCons.Node.Node):
NodeInfo = AliasNodeInfo
BuildInfo = AliasBuildInfo
def __init__(self, name):
SCons.Node.Node.__init__(self)
self.name = name
def str_for_display(self):
return '"' + self.__str__() + '"'
def __str__(self):
return self.name
def make_ready(self):
self.get_csig()
really_build = SCons.Node.Node.build
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Alias nodes get built regardless of
# what directory scons was run from. Alias nodes
# are outside the filesystem:
return 1
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs)
def sconsign(self):
"""An Alias is not recorded in .sconsign files"""
pass
#
#
#
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def build(self):
"""A "builder" for aliases."""
pass
def convert(self):
try: del self.builder
except AttributeError: pass
self.reset_executor()
self.build = self.really_build
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| dezelin/scons | scons-local/SCons/Node/Alias.py | Python | mit | 4,197 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import calendar
import datetime
import jwt
import mock
from twisted.cred import strcred
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.internet import defer
from twisted.trial import unittest
from twisted.web._auth.wrapper import HTTPAuthSessionWrapper
from twisted.web.server import Request
from buildbot.test.unit import test_www_hooks_base
from buildbot.test.util import www
from buildbot.www import auth
from buildbot.www import change_hook
from buildbot.www import resource
from buildbot.www import rest
from buildbot.www import service
class NeedsReconfigResource(resource.Resource):
needsReconfig = True
reconfigs = 0
def reconfigResource(self, config):
NeedsReconfigResource.reconfigs += 1
class Test(www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.master = self.make_master(url='h:/a/b/')
self.svc = self.master.www = service.WWWService()
self.svc.setServiceParent(self.master)
def makeConfig(self, **kwargs):
w = dict(port=None, auth=auth.NoAuth(), logfileName='l')
w.update(kwargs)
new_config = mock.Mock()
new_config.www = w
new_config.buildbotURL = 'h:/'
self.master.config = new_config
return new_config
def test_reconfigService_no_port(self):
new_config = self.makeConfig()
d = self.svc.reconfigServiceWithBuildbotConfig(new_config)
@d.addCallback
def check(_):
self.assertEqual(self.svc.site, None)
return d
@defer.inlineCallbacks
def test_reconfigService_reconfigResources(self):
new_config = self.makeConfig(port=8080)
self.patch(rest, 'RestRootResource', NeedsReconfigResource)
NeedsReconfigResource.reconfigs = 0
# first time, reconfigResource gets called along with setupSite
yield self.svc.reconfigServiceWithBuildbotConfig(new_config)
self.assertEqual(NeedsReconfigResource.reconfigs, 1)
# and the next time, setupSite isn't called, but reconfigResource is
yield self.svc.reconfigServiceWithBuildbotConfig(new_config)
self.assertEqual(NeedsReconfigResource.reconfigs, 2)
def test_reconfigService_port(self):
new_config = self.makeConfig(port=20)
d = self.svc.reconfigServiceWithBuildbotConfig(new_config)
@d.addCallback
def check(_):
self.assertNotEqual(self.svc.site, None)
self.assertNotEqual(self.svc.port_service, None)
self.assertEqual(self.svc.port, 20)
return d
def test_reconfigService_expiration_time(self):
new_config = self.makeConfig(port=80, cookie_expiration_time=datetime.timedelta(minutes=1))
d = self.svc.reconfigServiceWithBuildbotConfig(new_config)
@d.addCallback
def check(_):
self.assertNotEqual(self.svc.site, None)
self.assertNotEqual(self.svc.port_service, None)
self.assertEqual(service.BuildbotSession.expDelay, datetime.timedelta(minutes=1))
return d
def test_reconfigService_port_changes(self):
new_config = self.makeConfig(port=20)
d = self.svc.reconfigServiceWithBuildbotConfig(new_config)
@d.addCallback
def reconfig(_):
newer_config = self.makeConfig(port=999)
return self.svc.reconfigServiceWithBuildbotConfig(newer_config)
@d.addCallback
def check(_):
self.assertNotEqual(self.svc.site, None)
self.assertNotEqual(self.svc.port_service, None)
self.assertEqual(self.svc.port, 999)
return d
def test_reconfigService_port_changes_to_none(self):
new_config = self.makeConfig(port=20)
d = self.svc.reconfigServiceWithBuildbotConfig(new_config)
@d.addCallback
def reconfig(_):
newer_config = self.makeConfig()
return self.svc.reconfigServiceWithBuildbotConfig(newer_config)
@d.addCallback
def check(_):
# (note the site sticks around)
self.assertEqual(self.svc.port_service, None)
self.assertEqual(self.svc.port, None)
return d
def test_setupSite(self):
self.svc.setupSite(self.makeConfig())
site = self.svc.site
# check that it has the right kind of resources attached to its
# root
root = site.resource
req = mock.Mock()
self.assertIsInstance(root.getChildWithDefault(b'api', req),
rest.RestRootResource)
def test_setupSiteWithProtectedHook(self):
checker = InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser("guest", "password")
self.svc.setupSite(self.makeConfig(
change_hook_dialects={'base': True},
change_hook_auth=[checker]))
site = self.svc.site
# check that it has the right kind of resources attached to its
# root
root = site.resource
req = mock.Mock()
self.assertIsInstance(root.getChildWithDefault(b'change_hook', req),
HTTPAuthSessionWrapper)
@defer.inlineCallbacks
def test_setupSiteWithHook(self):
new_config = self.makeConfig(
change_hook_dialects={'base': True})
self.svc.setupSite(new_config)
site = self.svc.site
# check that it has the right kind of resources attached to its
# root
root = site.resource
req = mock.Mock()
ep = root.getChildWithDefault(b'change_hook', req)
self.assertIsInstance(ep,
change_hook.ChangeHookResource)
# not yet configured
self.assertEqual(ep.dialects, {})
yield self.svc.reconfigServiceWithBuildbotConfig(new_config)
# now configured
self.assertEqual(ep.dialects, {'base': True})
rsrc = self.svc.site.resource.getChildWithDefault(b'change_hook', mock.Mock())
path = b'/change_hook/base'
request = test_www_hooks_base._prepare_request({})
self.master.addChange = mock.Mock()
yield self.render_resource(rsrc, path, request=request)
self.master.addChange.assert_called()
@defer.inlineCallbacks
def test_setupSiteWithHookAndAuth(self):
fn = self.mktemp()
with open(fn, 'w') as f:
f.write("user:pass")
new_config = self.makeConfig(
port=8080,
plugins={},
change_hook_dialects={'base': True},
change_hook_auth=[strcred.makeChecker("file:" + fn)])
self.svc.setupSite(new_config)
yield self.svc.reconfigServiceWithBuildbotConfig(new_config)
rsrc = self.svc.site.resource.getChildWithDefault(b'', mock.Mock())
res = yield self.render_resource(rsrc, b'')
self.assertIn(b'{"type": "file"}', res)
rsrc = self.svc.site.resource.getChildWithDefault(
b'change_hook', mock.Mock())
res = yield self.render_resource(rsrc, b'/change_hook/base')
# as UnauthorizedResource is in private namespace, we cannot use
# assertIsInstance :-(
self.assertIn('UnauthorizedResource', repr(res))
class TestBuildbotSite(unittest.SynchronousTestCase):
SECRET = 'secret'
def setUp(self):
self.site = service.BuildbotSite(None, "logs", 0, 0)
self.site.setSessionSecret(self.SECRET)
def test_getSession_from_bad_jwt(self):
""" if the cookie is bad (maybe from previous version of buildbot),
then we should raise KeyError for consumption by caller,
and log the JWT error
"""
self.assertRaises(KeyError, self.site.getSession, "xxx")
self.flushLoggedErrors(jwt.exceptions.DecodeError)
def test_getSession_from_correct_jwt(self):
payload = {'user_info': {'some': 'payload'}}
uid = jwt.encode(payload, self.SECRET, algorithm=service.SESSION_SECRET_ALGORITHM)
session = self.site.getSession(uid)
self.assertEqual(session.user_info, {'some': 'payload'})
def test_getSession_from_expired_jwt(self):
# expired one week ago
exp = datetime.datetime.utcnow() - datetime.timedelta(weeks=1)
exp = calendar.timegm(datetime.datetime.timetuple(exp))
payload = {'user_info': {'some': 'payload'}, 'exp': exp}
uid = jwt.encode(payload, self.SECRET, algorithm=service.SESSION_SECRET_ALGORITHM)
self.assertRaises(KeyError, self.site.getSession, uid)
def test_getSession_with_no_user_info(self):
payload = {'foo': 'bar'}
uid = jwt.encode(payload, self.SECRET, algorithm=service.SESSION_SECRET_ALGORITHM)
self.assertRaises(KeyError, self.site.getSession, uid)
def test_makeSession(self):
session = self.site.makeSession()
self.assertEqual(session.user_info, {'anonymous': True})
def test_updateSession(self):
session = self.site.makeSession()
class FakeChannel(object):
transport = None
def isSecure(self):
return False
request = Request(FakeChannel(), False)
request.sitepath = [b"bb"]
session.updateSession(request)
self.assertEqual(len(request.cookies), 1)
name, value = request.cookies[0].split(b";")[0].split(b"=")
decoded = jwt.decode(value, self.SECRET,
algorithm=service.SESSION_SECRET_ALGORITHM)
self.assertEqual(decoded['user_info'], {'anonymous': True})
self.assertIn('exp', decoded)
| sbidoul/buildbot | master/buildbot/test/unit/test_www_service.py | Python | gpl-2.0 | 10,362 |
import unittest
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from pulp.server.webservices.urls import handler404
def assert_url_match(expected_url, url_name, *args, **kwargs):
"""
Generate a url given args and kwargs and pass it through Django's reverse and
resolve functions.
Example use to match a url /v2/tasks/<task_argument>/:
assert_url_match('/v2/tasks/example_arg/', 'tasks', task_argument='example_arg')
:param expected_url: the url that should be generated given a url_name and args
:type expected_url: str
:param url_name : name given to a url as defined in the urls.py
:type url_name : str
:param args : optional positional arguments to place into a url's parameters
as specified by urls.py
:type args : tuple
:param kwargs : optional named arguments to place into a url's parameters as
specified by urls.py
:type kwargs : dict
"""
try:
# Invalid arguments will cause a NoReverseMatch.
url = reverse(url_name, args=args, kwargs=kwargs)
except NoReverseMatch:
raise AssertionError(
"Name: '{0}' could match a url with args '{1}'"
"and kwargs '{2}'".format(url_name, args, kwargs)
)
else:
# If the url exists but is not the expected url.
if url != expected_url:
raise AssertionError(
'url {0} not equal to expected url {1}'.format(url, expected_url))
# Run this url back through resolve and ensure that it matches the url_name.
matched_view = resolve(url)
if matched_view.url_name != url_name:
raise AssertionError('Url name {0} not equal to expected url name {1}'.format(
matched_view.url_name, url_name)
)
class TestNotFoundHandler(unittest.TestCase):
def test_not_found_handler(self):
"""
Test that the handler404 module attribute is set as expected.
"""
self.assertEqual(handler404, 'pulp.server.webservices.views.util.page_not_found')
class TestDjangoContentUrls(unittest.TestCase):
"""
Test the matching of the content urls
"""
def test_match_content_catalog_resource(self):
"""
Test url matching for content_catalog_resource.
"""
url = '/v2/content/catalog/mock-source/'
url_name = 'content_catalog_resource'
assert_url_match(url, url_name, source_id='mock-source')
def test_match_content_orphan_collection(self):
"""
Test url matching for content_orphan_collection.
"""
url = '/v2/content/orphans/'
url_name = 'content_orphan_collection'
assert_url_match(url, url_name)
def test_match_content_units_collection(self):
"""
Test the url matching for content_units_collection.
"""
url = '/v2/content/units/mock-type/'
url_name = 'content_units_collection'
assert_url_match(url, url_name, type_id='mock-type')
def test_match_content_unit_search(self):
"""
Test the url matching for content_unit_search.
"""
url = '/v2/content/units/mock-type/search/'
url_name = 'content_unit_search'
assert_url_match(url, url_name, type_id='mock-type')
def test_match_content_unit_resource(self):
"""
Test url matching for content_unit_resource.
"""
url = '/v2/content/units/mock-type/mock-unit/'
url_name = 'content_unit_resource'
assert_url_match(url, url_name, type_id='mock-type', unit_id='mock-unit')
def test_match_content_unit_user_metadata_resource(self):
"""
Test url matching for content_unit_user_metadata_resource.
"""
url = '/v2/content/units/mock-type/mock-unit/pulp_user_metadata/'
url_name = 'content_unit_user_metadata_resource'
assert_url_match(url, url_name, type_id='mock-type', unit_id='mock-unit')
def test_match_content_upload_resource(self):
"""
Test url matching for content_upload_resource.
"""
url = '/v2/content/uploads/mock-upload/'
url_name = 'content_upload_resource'
assert_url_match(url, url_name, upload_id='mock-upload')
def test_match_content_upload_segment_resource(self):
"""
Test Url matching for content_upload_segment_resource.
"""
url = '/v2/content/uploads/mock-upload-id/8/'
url_name = 'content_upload_segment_resource'
assert_url_match(url, url_name, upload_id='mock-upload-id', offset='8')
def test_match_content_actions_delete_orphans(self):
"""
Test url matching for content_actions_delete_orphans.
"""
url = '/v2/content/actions/delete_orphans/'
url_name = 'content_actions_delete_orphans'
assert_url_match(url, url_name)
def test_match_content_orphan_resource(self):
"""
Test url matching for content_orphan_resource.
"""
url = '/v2/content/orphans/mock-type/mock-unit/'
url_name = 'content_orphan_resource'
assert_url_match(url, url_name, content_type='mock-type', unit_id='mock-unit')
def test_match_content_orphan_type_subcollection(self):
"""
Test url matching for content_orphan_type_subcollection.
"""
url = '/v2/content/orphans/mock_type/'
url_name = 'content_orphan_type_subcollection'
assert_url_match(url, url_name, content_type='mock_type')
def test_match_content_uploads(self):
"""
Test url matching for content_uploads.
"""
url = '/v2/content/uploads/'
url_name = 'content_uploads'
assert_url_match(url, url_name)
class TestDjangoPluginsUrls(unittest.TestCase):
"""
Test url matching for plugins urls.
"""
def test_match_distributor_resource_view(self):
"""
Test the url matching for the distributor resource view.
"""
url = '/v2/plugins/distributors/mock_distributor/'
url_name = 'plugin_distributor_resource'
assert_url_match(url, url_name, distributor_id='mock_distributor')
def test_match_distributors_view(self):
"""
Test the url matching for the Distributors view.
"""
url = '/v2/plugins/distributors/'
url_name = 'plugin_distributors'
assert_url_match(url, url_name)
def test_match_importer_resource_view(self):
"""
Test the url matching for plugin_importer_resource
"""
url = '/v2/plugins/importers/mock_importer_id/'
url_name = 'plugin_importer_resource'
assert_url_match(url, url_name, importer_id='mock_importer_id')
def test_match_importers_view(self):
"""
Test the url matching for the Importers view
"""
url = '/v2/plugins/importers/'
url_name = 'plugin_importers'
assert_url_match(url, url_name)
def test_match_type_resource_view(self):
"""
Test the url matching for the TypeResourceView.
"""
url = '/v2/plugins/types/type_id/'
url_name = 'plugin_type_resource'
assert_url_match(url, url_name, type_id='type_id')
def test_match_types_view(self):
"""
Test url matching for plugin_types.
"""
url = '/v2/plugins/types/'
url_name = 'plugin_types'
assert_url_match(url, url_name)
class TestDjangoLoginUrls(unittest.TestCase):
"""
Tests for root_actions urls.
"""
def test_match_login_view(self):
"""
Test url match for login.
"""
url = '/v2/actions/login/'
url_name = 'login'
assert_url_match(url, url_name)
class TestDjangoConsumerGroupsUrls(unittest.TestCase):
"""
Tests for consumer_groups urls
"""
def test_match_consumer_group_view(self):
"""
Test url matching for consumer_groups
"""
url = '/v2/consumer_groups/'
url_name = 'consumer_group'
assert_url_match(url, url_name)
def test_match_consumer_group_search_view(self):
"""
Test url matching for consumer_group_search
"""
url = '/v2/consumer_groups/search/'
url_name = 'consumer_group_search'
assert_url_match(url, url_name)
def test_match_consumer_group_resource_view(self):
"""
Test url matching for single consumer_group
"""
url = '/v2/consumer_groups/test-group/'
url_name = 'consumer_group_resource'
assert_url_match(url, url_name, consumer_group_id='test-group')
def test_match_consumer_group_associate_action_view(self):
"""
Test url matching for consumer_groups association
"""
url = '/v2/consumer_groups/test-group/actions/associate/'
url_name = 'consumer_group_associate'
assert_url_match(url, url_name, consumer_group_id='test-group')
def test_match_consumer_group_unassociate_action_view(self):
"""
Test url matching for consumer_groups unassociation
"""
url = '/v2/consumer_groups/test-group/actions/unassociate/'
url_name = 'consumer_group_unassociate'
assert_url_match(url, url_name, consumer_group_id='test-group')
def test_match_consumer_group_content_action_install_view(self):
"""
Test url matching for consumer_groups content installation
"""
url = '/v2/consumer_groups/test-group/actions/content/install/'
url_name = 'consumer_group_content'
assert_url_match(url, url_name, consumer_group_id='test-group', action='install')
def test_match_consumer_group_content_action_update_view(self):
"""
Test url matching for consumer_groups content update
"""
url = '/v2/consumer_groups/test-group/actions/content/update/'
url_name = 'consumer_group_content'
assert_url_match(url, url_name, consumer_group_id='test-group', action='update')
def test_match_consumer_group_content_action_uninstall_view(self):
"""
Test url matching for consumer_groups content uninstall
"""
url = '/v2/consumer_groups/test-group/actions/content/uninstall/'
url_name = 'consumer_group_content'
assert_url_match(url, url_name, consumer_group_id='test-group', action='uninstall')
def test_match_consumer_group_bindings_view(self):
"""
Test url matching for consumer_groups bindings
"""
url = '/v2/consumer_groups/test-group/bindings/'
url_name = 'consumer_group_bind'
assert_url_match(url, url_name, consumer_group_id='test-group')
def test_match_consumer_group_binding_view(self):
"""
Test url matching for consumer_groups binding removal
"""
url = '/v2/consumer_groups/test-group/bindings/repo1/dist1/'
url_name = 'consumer_group_unbind'
assert_url_match(url, url_name, consumer_group_id='test-group',
repo_id='repo1', distributor_id='dist1')
class TestDjangoRepositoriesUrls(unittest.TestCase):
"""
Test url matching for repositories urls.
"""
def test_match_repos(self):
"""
Test url matching for repos.
"""
url = '/v2/repositories/'
url_name = 'repos'
assert_url_match(url, url_name)
def test_match_repo_search(self):
"""
Test url matching for repo_search.
"""
url = '/v2/repositories/search/'
url_name = 'repo_search'
assert_url_match(url, url_name)
def test_match_repo_content_app_regen(self):
"""
Test url matching for repo_content_app_regen.
"""
url_name = 'repo_content_app_regen'
url = '/v2/repositories/actions/content/regenerate_applicability/'
assert_url_match(url, url_name)
def test_match_repo_resource(self):
"""
Test url matching for repo_resource.
"""
url_name = 'repo_resource'
url = '/v2/repositories/mock_repo/'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_unit_search(self):
"""
Test url matching for repo_unit_search.
"""
url_name = 'repo_unit_search'
url = '/v2/repositories/mock_repo/search/units/'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_importers(self):
"""
Test url matching for repo_importers.
"""
url_name = 'repo_importers'
url = '/v2/repositories/mock_repo/importers/'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_importer_resource(self):
"""
Test url matching for repo_importer_resource.
"""
url = '/v2/repositories/mock_repo/importers/mock_importer/'
url_name = 'repo_importer_resource'
assert_url_match(url, url_name, repo_id='mock_repo', importer_id='mock_importer')
def test_match_repo_sync_schedule_collection(self):
"""
Test url matching for repo_sync_schedules.
"""
url = '/v2/repositories/mock_repo/importers/mock_importer/schedules/sync/'
url_name = 'repo_sync_schedules'
assert_url_match(url, url_name, repo_id='mock_repo', importer_id='mock_importer')
def test_match_repo_sync_schedule_resource(self):
"""
Test url matching for repo_sync_schedule_resource.
"""
url = '/v2/repositories/mock_repo/importers/mock_importer/schedules/sync/mock_schedule/'
url_name = 'repo_sync_schedule_resource'
assert_url_match(url, url_name, repo_id='mock_repo', importer_id='mock_importer',
schedule_id='mock_schedule')
def test_match_repo_distributors(self):
"""
Test url matching for repo_distributors.
"""
url = '/v2/repositories/mock_repo/distributors/'
url_name = 'repo_distributors'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_distributor_resource(self):
"""
Test url matching for repo_distributor_resource.
"""
url = '/v2/repositories/mock_repo/distributors/mock_distributor/'
url_name = 'repo_distributor_resource'
assert_url_match(url, url_name, repo_id='mock_repo', distributor_id='mock_distributor')
def test_match_repo_publish_schedules(self):
"""
Test url matching for repo_publish_schedules.
"""
url = '/v2/repositories/mock_repo/distributors/mock_distributor/schedules/publish/'
url_name = 'repo_publish_schedules'
assert_url_match(url, url_name, repo_id='mock_repo', distributor_id='mock_distributor')
def test_match_repo_publish_schedule_resource(self):
"""
Test url matching for repo_publish_schedule_resource.
"""
url = '/v2/repositories/mock_repo/distributors/'\
'mock_distributor/schedules/publish/mock_schedule/'
url_name = 'repo_publish_schedule_resource'
assert_url_match(url, url_name, repo_id='mock_repo', distributor_id='mock_distributor',
schedule_id='mock_schedule')
def test_match_repo_sync_history(self):
"""
Test url matching for repo_sync_history.
"""
url = '/v2/repositories/mock_repo/history/sync/'
url_name = 'repo_sync_history'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_sync(self):
"""
Test url matching for repo_sync.
"""
url = '/v2/repositories/mock_repo/actions/sync/'
url_name = 'repo_sync'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_download(self):
"""
Test url matching for repo_download.
"""
url = '/v2/repositories/mock_repo/actions/download/'
url_name = 'repo_download'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_publish_history(self):
"""
Test url matching for repo_publish_history.
"""
url = '/v2/repositories/mock_repo/history/publish/mock_dist/'
url_name = 'repo_publish_history'
assert_url_match(url, url_name, repo_id='mock_repo', distributor_id='mock_dist')
def test_match_repo_publish(self):
"""
Test url matching for repo_publish.
"""
url = '/v2/repositories/mock_repo/actions/publish/'
url_name = 'repo_publish'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_associate(self):
"""
Test url matching for repo_associate.
"""
url = '/v2/repositories/mock_repo/actions/associate/'
url_name = 'repo_associate'
assert_url_match(url, url_name, dest_repo_id='mock_repo')
def test_match_repo_unassociate(self):
"""
Test url matching for repo_unassociate.
"""
url = '/v2/repositories/mock_repo/actions/unassociate/'
url_name = 'repo_unassociate'
assert_url_match(url, url_name, repo_id='mock_repo')
def test_match_repo_import_upload(self):
"""
Test url matching for repo_import_upload.
"""
url = '/v2/repositories/mock_repo/actions/import_upload/'
url_name = 'repo_import_upload'
assert_url_match(url, url_name, repo_id='mock_repo')
class TestDjangoRepoGroupsUrls(unittest.TestCase):
"""
Test url matching for repo_groups urls
"""
def test_match_repo_groups(self):
"""Test url matching for repo_groups."""
url = '/v2/repo_groups/'
url_name = 'repo_groups'
assert_url_match(url, url_name)
def test_match_repo_group_search(self):
"""Test url matching for repo_group_search."""
url = '/v2/repo_groups/search/'
url_name = 'repo_group_search'
assert_url_match(url, url_name)
def test_match_repo_group_resource(self):
url = '/v2/repo_groups/test-group-id/'
url_name = 'repo_group_resource'
assert_url_match(url, url_name, repo_group_id='test-group-id')
def test_match_repo_group_associate(self):
url = '/v2/repo_groups/test-group-id/actions/associate/'
url_name = 'repo_group_associate'
assert_url_match(url, url_name, repo_group_id='test-group-id')
def test_match_repo_group_unassociate(self):
url = '/v2/repo_groups/test-group-id/actions/unassociate/'
url_name = 'repo_group_unassociate'
assert_url_match(url, url_name, repo_group_id='test-group-id')
def test_match_repo_group_distributors(self):
url = '/v2/repo_groups/test-group-id/distributors/'
url_name = 'repo_group_distributors'
assert_url_match(url, url_name, repo_group_id='test-group-id')
def test_match_repo_group_distributor_resource(self):
url = '/v2/repo_groups/test-group-id/distributors/test-distributor/'
url_name = 'repo_group_distributor_resource'
assert_url_match(url, url_name, repo_group_id='test-group-id',
distributor_id='test-distributor')
def test_repo_group_publish(self):
url = '/v2/repo_groups/test-group-id/actions/publish/'
url_name = 'repo_group_publish'
assert_url_match(url, url_name, repo_group_id='test-group-id')
class TestDjangoTasksUrls(unittest.TestCase):
"""
Test the matching for tasks urls.
"""
def test_match_task_collection(self):
"""
Test the matching for task_collection.
"""
url = '/v2/tasks/'
url_name = 'task_collection'
assert_url_match(url, url_name)
def test_match_task_resource(self):
"""
Test the matching for task_resource.
"""
url = '/v2/tasks/test-task/'
url_name = 'task_resource'
assert_url_match(url, url_name, task_id='test-task')
def test_match_task_search(self):
"""
Test the matching for task_resource.
"""
url = '/v2/tasks/search/'
url_name = 'task_search'
assert_url_match(url, url_name)
class TestDjangoRolesUrls(unittest.TestCase):
"""
Tests for roles urls.
"""
def test_match_roles_view(self):
"""
Test url match for roles.
"""
url = '/v2/roles/'
url_name = 'roles'
assert_url_match(url, url_name)
def test_match_role_resource_view(self):
"""
Test url matching for single role.
"""
url = '/v2/roles/test-role/'
url_name = 'role_resource'
assert_url_match(url, url_name, role_id='test-role')
def test_match_role_users_view(self):
"""
Test url matching for role's users.
"""
url = '/v2/roles/test-role/users/'
url_name = 'role_users'
assert_url_match(url, url_name, role_id='test-role')
def test_match_role_user_view(self):
"""
Test url matching for role's user.
"""
url = '/v2/roles/test-role/users/test-login/'
url_name = 'role_user'
assert_url_match(url, url_name, role_id='test-role', login='test-login')
class TestDjangoPermissionsUrls(unittest.TestCase):
"""
Tests for permissions urls
"""
def test_match_permissions_view(self):
"""
Test url matching for permissions
"""
url = '/v2/permissions/'
url_name = 'permissions'
assert_url_match(url, url_name)
def test_match_permission_grant_to_role_view(self):
"""
Test url matching for grant permissions to a role
"""
url = '/v2/permissions/actions/grant_to_role/'
url_name = 'grant_to_role'
assert_url_match(url, url_name)
def test_match_permission_grant_to_user_view(self):
"""
Test url matching for grant permissions to a user
"""
url = '/v2/permissions/actions/grant_to_user/'
url_name = 'grant_to_user'
assert_url_match(url, url_name)
def test_match_permission_revoke_from_role_view(self):
"""
Test url matching for revoke permissions from a role
"""
url = '/v2/permissions/actions/revoke_from_role/'
url_name = 'revoke_from_role'
assert_url_match(url, url_name)
def test_match_permission_revoke_from_userview(self):
"""
Test url matching for revoke permissions from a user
"""
url = '/v2/permissions/actions/revoke_from_user/'
url_name = 'revoke_from_user'
assert_url_match(url, url_name)
class TestDjangoEventListenersUrls(unittest.TestCase):
"""
Tests for events urls
"""
def test_match_event_listeners_view(self):
"""
Test url matching for event_listeners
"""
url = '/v2/events/'
url_name = 'events'
assert_url_match(url, url_name)
def test_match_event_listeners_resource_view(self):
"""
Test url matching for single event_listener
"""
url = '/v2/events/12345/'
url_name = 'event_resource'
assert_url_match(url, url_name, event_listener_id='12345')
class TestDjangoUsersUrls(unittest.TestCase):
"""
Tests for userss urls
"""
def test_match_users_view(self):
"""
Test url matching for users
"""
url = '/v2/users/'
url_name = 'users'
assert_url_match(url, url_name)
def test_match_user_search_view(self):
"""
Test url matching for user search.
"""
url = '/v2/users/search/'
url_name = 'user_search'
assert_url_match(url, url_name)
def test_match_user_resource(self):
"""
Test the matching for user resource.
"""
url = '/v2/users/user_login/'
url_name = 'user_resource'
assert_url_match(url, url_name, login='user_login')
class TestStatusUrl(unittest.TestCase):
"""
Tests for server status url
"""
def test_match_status_view(self):
"""
Test url matching for status
"""
url = '/v2/status/'
url_name = 'status'
assert_url_match(url, url_name)
class TestDjangoConsumersUrls(unittest.TestCase):
"""
Tests for consumers urls
"""
def test_match_consumers_view(self):
"""
Test url matching for consumer
"""
url = '/v2/consumers/'
url_name = 'consumers'
assert_url_match(url, url_name)
def test_match_consumer_search(self):
"""
Test url matching for consumer_search.
"""
url = '/v2/consumers/search/'
url_name = 'consumer_search'
assert_url_match(url, url_name)
def test_match_consumer_resource_view(self):
"""
Test url matching for consumer resource.
"""
url = '/v2/consumers/test-consumer/'
url_name = 'consumer_resource'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_search_view(self):
"""
Test url matching for consumer search.
"""
url = '/v2/consumers/search/'
url_name = 'consumer_search'
assert_url_match(url, url_name)
def test_match_consumer_binding_search_view(self):
"""
Test url matching for consumer binding search.
"""
url = '/v2/consumers/binding/search/'
url_name = 'consumer_binding_search'
assert_url_match(url, url_name)
def test_match_consumer_profile_search_view(self):
"""
Test url matching for consumer profile search.
"""
url = '/v2/consumers/profile/search/'
url_name = 'consumer_profile_search'
assert_url_match(url, url_name)
def test_match_consumer_profiles_view(self):
"""
Test url matching for consumer profiles
"""
url = '/v2/consumers/test-consumer/profiles/'
url_name = 'consumer_profiles'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_profile_resource_view(self):
"""
Test url matching for consumer profile resource
"""
url = '/v2/consumers/test-consumer/profiles/some-profile/'
url_name = 'consumer_profile_resource'
assert_url_match(url, url_name, consumer_id='test-consumer', content_type='some-profile')
def test_match_consumer_bindings_view(self):
"""
Test url matching for consumer bindings
"""
url = '/v2/consumers/test-consumer/bindings/'
url_name = 'bindings'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_binding_resource_view(self):
"""
Test url matching for consumer binding resource
"""
url = '/v2/consumers/test-consumer/bindings/some-repo/some-dist/'
url_name = 'consumer_binding_resource'
assert_url_match(url, url_name, consumer_id='test-consumer', repo_id='some-repo',
distributor_id='some-dist')
def test_match_consumer_binding_repo_view(self):
"""
Test url matching for consumer and repo binding
"""
url = '/v2/consumers/test-consumer/bindings/some-repo/'
url_name = 'bindings_repo'
assert_url_match(url, url_name, consumer_id='test-consumer', repo_id='some-repo')
def test_match_consumer_appicability_regen_view(self):
"""
Test url matching for consumer applicability renegeration
"""
url = '/v2/consumers/test-consumer/actions/content/regenerate_applicability/'
url_name = 'consumer_appl_regen'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_content_action_install_view(self):
"""
Test url matching for consumer content installation
"""
url = '/v2/consumers/test-consumer/actions/content/install/'
url_name = 'consumer_content'
assert_url_match(url, url_name, consumer_id='test-consumer', action='install')
def test_match_consumer_content_action_update_view(self):
"""
Test url matching for consumer content update
"""
url = '/v2/consumers/test-consumer/actions/content/update/'
url_name = 'consumer_content'
assert_url_match(url, url_name, consumer_id='test-consumer', action='update')
def test_match_consumer_content_action_uninstall_view(self):
"""
Test url matching for consumer content uninstall
"""
url = '/v2/consumers/test-consumer/actions/content/uninstall/'
url_name = 'consumer_content'
assert_url_match(url, url_name, consumer_id='test-consumer', action='uninstall')
def test_match_consumers_appicability_regen_view(self):
"""
Test url matching for consumers applicability renegeration
"""
url = '/v2/consumers/actions/content/regenerate_applicability/'
url_name = 'appl_regen'
assert_url_match(url, url_name)
def test_match_consumer_query_appicability_view(self):
"""
Test url matching for consumer query applicability
"""
url = '/v2/consumers/content/applicability/'
url_name = 'consumer_query_appl'
assert_url_match(url, url_name)
def test_match_consumer_schedule_content_action_install_view(self):
"""
Test url matching for consumer schedule content installation
"""
url = '/v2/consumers/test-consumer/schedules/content/install/'
url_name = 'schedule_content_install'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_schedule_content_action_update_view(self):
"""
Test url matching for consumer schedule content update
"""
url = '/v2/consumers/test-consumer/schedules/content/update/'
url_name = 'schedule_content_update'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_schedule_content_action_uninstall_view(self):
"""
Test url matching for consumer schedule content uninstall
"""
url = '/v2/consumers/test-consumer/schedules/content/uninstall/'
url_name = 'schedule_content_uninstall'
assert_url_match(url, url_name, consumer_id='test-consumer')
def test_match_consumer_schedule_content_action_install_resource_view(self):
"""
Test url matching for consumer schedule content resource installation
"""
url = '/v2/consumers/test-consumer/schedules/content/install/12345/'
url_name = 'schedule_content_install_resource'
assert_url_match(url, url_name, consumer_id='test-consumer', schedule_id='12345')
def test_match_consumer_schedule_content_action_update_resource_view(self):
"""
Test url matching for consumer schedule content resource update
"""
url = '/v2/consumers/test-consumer/schedules/content/update/12345/'
url_name = 'schedule_content_update_resource'
assert_url_match(url, url_name, consumer_id='test-consumer', schedule_id='12345')
def test_match_consumer_schedule_content_action_uninstall_resource_view(self):
"""
Test url matching for consumer schedule content resource uninstall
"""
url = '/v2/consumers/test-consumer/schedules/content/uninstall/12345/'
url_name = 'schedule_content_uninstall_resource'
assert_url_match(url, url_name, consumer_id='test-consumer', schedule_id='12345')
def test_match_consumer_history_view(self):
"""
Test url matching for consumer history
"""
url = '/v2/consumers/test-consumer/history/'
url_name = 'consumer_history'
assert_url_match(url, url_name, consumer_id='test-consumer')
class TestDjangoContentSourcesUrls(unittest.TestCase):
"""
Tests for content sources.
"""
def test_match_content_sources_view(self):
"""
Test url matching for content sources.
"""
url = '/v2/content/sources/'
url_name = 'content_sources'
assert_url_match(url, url_name)
def test_match_content_sources_resource(self):
"""
Test the matching for content sources resource.
"""
url = '/v2/content/sources/some-source/'
url_name = 'content_sources_resource'
assert_url_match(url, url_name, source_id='some-source')
def test_match_content_sources_refresh_view(self):
"""
Test url matching for content sources refresh.
"""
url = '/v2/content/sources/action/refresh/'
url_name = 'content_sources_action'
assert_url_match(url, url_name, action='refresh')
def test_match_content_sources_resource_refresh(self):
"""
Test the matching for content sources resource refresh.
"""
url = '/v2/content/sources/some-source/action/refresh/'
url_name = 'content_sources_resource_action'
assert_url_match(url, url_name, source_id='some-source', action='refresh')
| ulif/pulp | server/test/unit/server/webservices/test_urls.py | Python | gpl-2.0 | 33,452 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from datetime import date, datetime
import six
from wtforms import DateField
from wtforms.validators import optional
from ..field_base import WebDepositField
__all__ = ['Date']
class Date(WebDepositField, DateField):
def __init__(self, **kwargs):
defaults = dict(
icon='calendar',
validators=[optional()],
widget_classes="form-control"
)
defaults.update(kwargs)
super(Date, self).__init__(**defaults)
def process_data(self, value):
"""
Called when loading data from Python (incoming objects can be either
datetime objects or strings, depending on if they are loaded from
an JSON or Python objects).
"""
if isinstance(value, six.string_types):
self.object_data = datetime.strptime(value, self.format).date()
elif isinstance(value, datetime):
self.object_data = value.date()
elif isinstance(value, date):
self.object_data = value
# Be sure to set both self.object_data and self.data due to internals
# of Field.process() and draft_form_process_and_validate().
self.data = self.object_data
@property
def json_data(self):
"""
Serialize data into JSON serializalbe object
"""
# Just use _value() to format the date into a string.
if self.data:
return self.data.strftime(self.format) # pylint: disable-msg=
return None
| quantifiedcode-bot/invenio-deposit | invenio_deposit/fields/date.py | Python | gpl-2.0 | 2,268 |
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, u'='), value])
def LParen():
return Leaf(token.LPAR, u"(")
def RParen():
return Leaf(token.RPAR, u")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = u" "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, u",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, u".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, u"\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, u"")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, u'['),
index_node,
Leaf(token.RBRACE, u']')])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, string, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = u""
fp.prefix = u" "
it.prefix = u" "
for_leaf = Leaf(token.NAME, u"for")
for_leaf.prefix = u" "
in_leaf = Leaf(token.NAME, u"in")
in_leaf.prefix = u" "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = u" "
if_leaf = Leaf(token.NAME, u"if")
if_leaf.prefix = u" "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, u"["),
inner,
Leaf(token.RBRACE, u"]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, u'from'),
Leaf(token.NAME, package_name, prefix=u" "),
Leaf(token.NAME, u'import', prefix=u" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == u"("
and node.children[2].value == u")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == u"["
and node.children[-1].value == u"]")
###########################################################
### Misc
###########################################################
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being itterable (ie, it doesn't matter if it returns a list
or an itterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
def is_probably_builtin(node):
"""
Check that something isn't an attribute or function name etc.
"""
prev = node.prev_sibling
if prev is not None and prev.type == token.DOT:
# Attribute lookup.
return False
parent = node.parent
if parent.type in (syms.funcdef, syms.classdef):
return False
if parent.type == syms.expr_stmt and parent.children[0] is node:
# Assignment.
return False
if parent.type == syms.parameters or \
(parent.type == syms.typedargslist and (
(prev is not None and prev.type == token.COMMA) or
parent.children[0] is node
)):
# The name of an argument.
return False
return True
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
assert node.parent, "Tree is insane! root found before "\
"file_input node was found."
node = node.parent
return node
def does_tree_import(package, name, node):
""" Returns true if name is imported from package at the
top level of the tree which node belongs to.
To cover the case of an import like 'import foo', use
None for the package and 'foo' for the name. """
binding = find_binding(name, find_root(node), package)
return bool(binding)
def is_import(node):
"""Returns true if the node is an import statement."""
return node.type in (syms.import_name, syms.import_from)
def touch_import(package, name, node):
""" Works like `does_tree_import` but adds an import statement
if it was not imported. """
def is_import_stmt(node):
return node.type == syms.simple_stmt and node.children and \
is_import(node.children[0])
root = find_root(node)
if does_tree_import(package, name, root):
return
# figure out where to insert the new import. First try to find
# the first import and then skip to the last one.
insert_pos = offset = 0
for idx, node in enumerate(root.children):
if not is_import_stmt(node):
continue
for offset, node2 in enumerate(root.children[idx:]):
if not is_import_stmt(node2):
break
insert_pos = idx + offset
break
# if there are no imports where we can insert, find the docstring.
# if that also fails, we stick to the beginning of the file
if insert_pos == 0:
for idx, node in enumerate(root.children):
if node.type == syms.simple_stmt and node.children and \
node.children[0].type == token.STRING:
insert_pos = idx + 1
break
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, u'import'),
Leaf(token.NAME, name, prefix=u' ')
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u' ')])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
_def_syms = set([syms.classdef, syms.funcdef])
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
be returned.
See test cases for examples."""
for child in node.children:
ret = None
if child.type == syms.for_stmt:
if _find(name, child.children[1]):
return child
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type in (syms.if_stmt, syms.while_stmt):
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type == syms.try_stmt:
n = find_binding(name, make_suite(child.children[2]), package)
if n:
ret = n
else:
for i, kid in enumerate(child.children[3:]):
if kid.type == token.COLON and kid.value == ":":
# i+3 is the colon, i+4 is the suite
n = find_binding(name, make_suite(child.children[i+4]), package)
if n: ret = n
elif child.type in _def_syms and child.children[1].value == name:
ret = child
elif _is_import_binding(child, name, package):
ret = child
elif child.type == syms.simple_stmt:
ret = find_binding(name, child, package)
elif child.type == syms.expr_stmt:
if _find(name, child.children[0]):
ret = child
if ret:
if not package:
return ret
if is_import(ret):
return ret
return None
_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
def _find(name, node):
nodes = [node]
while nodes:
node = nodes.pop()
if node.type > 256 and node.type not in _block_syms:
nodes.extend(node.children)
elif node.type == token.NAME and node.value == name:
return node
return None
def _is_import_binding(node, name, package=None):
""" Will reuturn node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
if node.type == syms.import_name and not package:
imp = node.children[1]
if imp.type == syms.dotted_as_names:
for child in imp.children:
if child.type == syms.dotted_as_name:
if child.children[2].value == name:
return node
elif child.type == token.NAME and child.value == name:
return node
elif imp.type == syms.dotted_as_name:
last = imp.children[-1]
if last.type == token.NAME and last.value == name:
return node
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
# unicode(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and unicode(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find(u'as', n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
return node
elif n.type == syms.import_as_name:
child = n.children[2]
if child.type == token.NAME and child.value == name:
return node
elif n.type == token.NAME and n.value == name:
return node
elif package and n.type == token.STAR:
return node
return None
| 2ndy/RaspIM | usr/lib/python2.6/lib2to3/fixer_util.py | Python | gpl-2.0 | 14,225 |
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import BasecampProvider
class BasecampOAuth2Adapter(OAuth2Adapter):
provider_id = BasecampProvider.id
access_token_url = 'https://launchpad.37signals.com/authorization/token?type=web_server' # noqa
authorize_url = 'https://launchpad.37signals.com/authorization/new'
profile_url = 'https://launchpad.37signals.com/authorization.json'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {0}'.format(token.token)}
resp = requests.get(self.profile_url, headers=headers)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(BasecampOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(BasecampOAuth2Adapter)
| Alexander-M-Waldman/local_currency_site | lib/python2.7/site-packages/allauth/socialaccount/providers/basecamp/views.py | Python | gpl-3.0 | 1,123 |
#! /usr/bin/env python2
"""
discovery-wrapper A small tool which wraps around discovery and tries to
guide the discovery process with a more modern approach with a
Queue and workers.
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <[email protected]>
Date: Sep 2016
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 1 thread.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/.
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
try:
import json
import os
import Queue
import subprocess
import sys
import threading
import time
except:
print "ERROR: missing one or more of the following python modules:"
print "threading, Queue, sys, subprocess, time, os, json"
sys.exit(2)
try:
import MySQLdb
except:
print "ERROR: missing the mysql python module:"
print "On ubuntu: apt-get install python-mysqldb"
print "On FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean"
sys.exit(2)
"""
Fetch configuration details from the config_to_json.php script
"""
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
print "ERROR: Could not execute: %s" % config_cmd
sys.exit(2)
return proc.communicate()[0]
try:
with open(config_file) as f:
pass
except IOError as e:
print "ERROR: Oh dear... %s does not seem readable" % config_file
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
print "ERROR: Could not load or parse configuration, are PATHs correct?"
sys.exit(2)
discovery_path = config['install_dir'] + '/discovery.php'
db_username = config['db_user']
db_password = config['db_pass']
db_port = int(config['db_port'])
if config['db_socket']:
db_server = config['db_host']
db_socket = config['db_socket']
else:
db_server = config['db_host']
db_socket = None
db_dbname = config['db_name']
def db_open():
try:
if db_socket:
db = MySQLdb.connect(host=db_server, unix_socket=db_socket, user=db_username, passwd=db_password, db=db_dbname)
else:
db = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
return db
except:
print "ERROR: Could not connect to MySQL database!"
sys.exit(2)
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC1
if 'distributed_poller_group' in config:
discovery_group = str(config['distributed_poller_group'])
else:
discovery_group = False
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('discovery.ping.' + key, key, 60)
if memc.get('discovery.ping.' + key) == key:
memc.delete('discovery.ping.' + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("discovery.master")) == config['distributed_poller_name']:
print "This system is already joined as the discovery master."
sys.exit(2)
if memc_alive():
if memc.get("discovery.master") is None:
print "Registered as Master"
memc.set("discovery.master", config['distributed_poller_name'], 30)
memc.set("discovery.nodes", 0, 3600)
IsNode = False
else:
print "Registered as Node joining Master %s" % memc.get("discovery.master")
IsNode = True
memc.incr("discovery.nodes")
distdisco = True
else:
print "Could not connect to memcached, disabling distributed discovery."
distdisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print "ERROR: missing memcache python module:"
print "On deb systems: apt-get install python-memcache"
print "On other systems: easy_install python-memcached"
print "Disabling distributed discovery."
distdisco = False
else:
distdisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
discovered_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
try:
amount_of_workers = int(sys.argv[1])
if amount_of_workers == 0:
print "ERROR: 0 threads is not a valid value"
sys.exit(2)
except:
amount_of_workers = 1
devices_list = []
"""
This query specificly orders the results depending on the last_discovered_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
discover the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC2
if discovery_group is not False:
query = "select device_id from devices where poller_group IN(" + discovery_group + ") and disabled = 0 order by last_polled_timetaken desc"
else:
query = "select device_id from devices where disabled = 0 order by last_polled_timetaken desc"
# EOC2
db = db_open()
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC3
if distdisco and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0]
minlocks = devices[0][1]
# EOC3
db.close()
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then they two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC4
global IsNode
global distdisco
if distdisco:
if not IsNode:
memc_touch('discovery.master', 30)
nodes = memc.get('discovery.nodes')
if nodes is None and not memc_alive():
print "WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
distdisco = False
nodes = nodeso
if nodes is not nodeso:
print "INFO: %s Node(s) Total" % (nodes)
nodeso = nodes
else:
memc_touch('discovery.nodes', 30)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global discovered_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
discovered_devices += 1
if elapsed_time < 300:
print "INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
else:
print "WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time)
print_queue.task_done()
"""
This class will fork off single instances of the discovery.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC5
if not distdisco or memc.get('discovery.device.' + str(device_id)) is None:
if distdisco:
result = memc.add('discovery.device.' + str(device_id), config['distributed_poller_name'], 300)
if not result:
print "This device (%s) appears to be being discovered by another discovery node" % (device_id)
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print "Lost Memcached, Not discovering Device %s as Node. Master will discover it." % device_id
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
command = "/usr/bin/env php %s -h %s >> /dev/null 2>&1" % (discovery_path, device_id)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put([threading.current_thread().name, device_id, elapsed_time])
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
poll_queue = Queue.Queue()
print_queue = Queue.Queue()
print "INFO: starting the discovery at %s with %s threads, slowest devices first" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers)
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print "INFO: discovery-wrapper polled %s devices in %s seconds with %s workers" % (discovered_devices, total_time, amount_of_workers)
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC6
if distdisco or memc_alive():
master = memc.get("discovery.master")
if master == config['distributed_poller_name'] and not IsNode:
print "Wait for all discovery-nodes to finish"
nodes = memc.get("discovery.nodes")
while nodes > 0 and nodes is not None:
try:
time.sleep(1)
nodes = memc.get("discovery.nodes")
except:
pass
print "Clearing Locks"
x = minlocks
while x <= maxlocks:
memc.delete('discovery.device.' + str(x))
x = x + 1
print "%s Locks Cleared" % x
print "Clearing Nodes"
memc.delete("discovery.master")
memc.delete("discovery.nodes")
else:
memc.decr("discovery.nodes")
print "Finished %s." % time.time()
# EOC6
show_stopper = False
if total_time > 21600:
print "WARNING: the process took more than 6 hours to finish, you need faster hardware or more threads"
print "INFO: in sequential style discovery the elapsed time would have been: %s seconds" % real_duration
for device in per_device_duration:
if per_device_duration[device] > 3600:
print "WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device])
show_stopper = True
if show_stopper:
print "ERROR: Some devices are taking more than 3600 seconds, the script cannot recommend you what to do."
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print "WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend
sys.exit(2)
| wiad/librenms | discovery-wrapper.py | Python | gpl-3.0 | 13,314 |
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
import boto.jsonresponse
from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
class Layer1(AWSQueryConnection):
APIVersion = '2010-12-01'
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None,
proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None, security_token=None, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token, profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _get_response(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
def check_dns_availability(self, cname_prefix):
"""Checks if the specified CNAME is available.
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
def create_application(self, application_name, description=None):
"""
Creates an application that has one configuration template
named default and no application versions.
:type application_name: string
:param application_name: The name of the application.
Constraint: This name must be unique within your account. If the
specified name already exists, the action returns an
InvalidParameterValue error.
:type description: string
:param description: Describes the application.
:raises: TooManyApplicationsException
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('CreateApplication', params)
def create_application_version(self, application_name, version_label,
description=None, s3_bucket=None,
s3_key=None, auto_create_application=None):
"""Creates an application version for the specified application.
:type application_name: string
:param application_name: The name of the application. If no
application is found with this name, and AutoCreateApplication is
false, returns an InvalidParameterValue error.
:type version_label: string
:param version_label: A label identifying this version. Constraint:
Must be unique per application. If an application version already
exists with this label for the specified application, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
:param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
:param s3_key: The Amazon S3 key where the data is located. Both
s3_bucket and s3_key must be specified in order to use a specific
source bundle. If both of these values are not specified the
sample application will be used.
:type auto_create_application: boolean
:param auto_create_application: Determines how the system behaves if
the specified application for this version does not already exist:
true: Automatically creates the specified application for this
version if it does not already exist. false: Returns an
InvalidParameterValue if the specified application for this version
does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
InsufficientPrivilegesException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
if s3_bucket and s3_key:
params['SourceBundle.S3Bucket'] = s3_bucket
params['SourceBundle.S3Key'] = s3_key
if auto_create_application:
params['AutoCreateApplication'] = self._encode_bool(
auto_create_application)
return self._get_response('CreateApplicationVersion', params)
def create_configuration_template(self, application_name, template_name,
solution_stack_name=None,
source_configuration_application_name=None,
source_configuration_template_name=None,
environment_id=None, description=None,
option_settings=None):
"""Creates a configuration template.
Templates are associated with a specific application and are used to
deploy different versions of the application with the same
configuration settings.
:type application_name: string
:param application_name: The name of the application to associate with
this configuration template. If no application is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template.
Constraint: This name must be unique per application. Default: If
a configuration template already exists with this name, AWS Elastic
Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack used by this
configuration. The solution stack specifies the operating system,
architecture, and application server for a configuration template.
It determines the set of configuration options as well as the
possible and default values. Use ListAvailableSolutionStacks to
obtain a list of available solution stacks. Default: If the
SolutionStackName is not specified and the source configuration
parameter is blank, AWS Elastic Beanstalk uses the default solution
stack. If not specified and the source configuration parameter is
specified, AWS Elastic Beanstalk uses the same solution stack as
the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration option to the requested value. The new
value overrides the value obtained from the solution stack or the
source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if source_configuration_application_name:
params['SourceConfiguration.ApplicationName'] = source_configuration_application_name
if source_configuration_template_name:
params['SourceConfiguration.TemplateName'] = source_configuration_template_name
if environment_id:
params['EnvironmentId'] = environment_id
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
return self._get_response('CreateConfigurationTemplate', params)
def create_environment(self, application_name, environment_name,
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
:param application_name: The name of the application that contains the
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
characters in length. The name can contain only letters, numbers,
and hyphens. It cannot start or end with a hyphen. This name must
be unique in your account. If the specified name already exists,
AWS Elastic Beanstalk returns an InvalidParameterValue error.
Default: If the CNAME parameter is not specified, the environment
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
Condition: You must specify either this parameter or a
SolutionStackName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type solution_stack_name: string
:param solution_stack_name: This is an alternative to specifying a
configuration name. If specified, AWS Elastic Beanstalk sets the
configuration values to the default values associated with the
specified solution stack. Condition: You must specify either this
or a TemplateName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type cname_prefix: string
:param cname_prefix: If specified, the environment attempts to use this
value as the prefix for the CNAME. If not specified, the
environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk sets the
specified configuration options to the requested value in the
configuration set for the new environment. These override the
values obtained from the solution stack or the configuration
template. Each element in the list is a tuple of (Namespace,
OptionName, Value), for example::
[('aws:autoscaling:launchconfiguration',
'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if cname_prefix:
params['CNAMEPrefix'] = cname_prefix
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
"""
Creates the Amazon S3 storage location for the account. This
location is used to store user log files.
:raises: TooManyBucketsException,
S3SubscriptionRequiredException,
InsufficientPrivilegesException
"""
return self._get_response('CreateStorageLocation', params={})
def delete_application(self, application_name,
terminate_env_by_force=None):
"""
Deletes the specified application along with all associated
versions and configurations. The application versions will not
be deleted from your Amazon S3 bucket.
:type application_name: string
:param application_name: The name of the application to delete.
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
environments will be terminated before deleting the application.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name}
if terminate_env_by_force:
params['TerminateEnvByForce'] = self._encode_bool(
terminate_env_by_force)
return self._get_response('DeleteApplication', params)
def delete_application_version(self, application_name, version_label,
delete_source_bundle=None):
"""Deletes the specified version from the specified application.
:type application_name: string
:param application_name: The name of the application to delete
releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
associated source bundle from Amazon S3. Valid Values: true |
false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
OperationInProgressException,
S3LocationNotInServiceRegionException
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if delete_source_bundle:
params['DeleteSourceBundle'] = self._encode_bool(
delete_source_bundle)
return self._get_response('DeleteApplicationVersion', params)
def delete_configuration_template(self, application_name, template_name):
"""Deletes the specified configuration template.
:type application_name: string
:param application_name: The name of the application to delete
the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
delete.
:raises: OperationInProgressException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
return self._get_response('DeleteConfigurationTemplate', params)
def delete_environment_configuration(self, application_name,
environment_name):
"""
Deletes the draft configuration associated with the running
environment. Updating a running environment with any
configuration changes creates a draft configuration set. You can
get the draft configuration using DescribeConfigurationSettings
while the update is in progress or if the update fails. The
DeploymentStatus for the draft configuration indicates whether
the deployment is in process or has failed. The draft
configuration remains in existence until it is deleted with this
action.
:type application_name: string
:param application_name: The name of the application the
environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
the draft configuration from.
"""
params = {'ApplicationName': application_name,
'EnvironmentName': environment_name}
return self._get_response('DeleteEnvironmentConfiguration', params)
def describe_application_versions(self, application_name=None,
version_labels=None):
"""Returns descriptions for existing application versions.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include ones that are associated
with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
descriptions to only include ones that have the specified version
labels.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_labels:
self.build_list_params(params, version_labels,
'VersionLabels.member')
return self._get_response('DescribeApplicationVersions', params)
def describe_applications(self, application_names=None):
"""Returns the descriptions of existing applications.
:type application_names: list
:param application_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to only include those with the specified
names.
"""
params = {}
if application_names:
self.build_list_params(params, application_names,
'ApplicationNames.member')
return self._get_response('DescribeApplications', params)
def describe_configuration_options(self, application_name=None,
template_name=None,
environment_name=None,
solution_stack_name=None, options=None):
"""Describes configuration options used in a template or environment.
Describes the configuration options that are used in a
particular configuration template or environment, or that a
specified solution stack defines. The description includes the
values the options, their default values, and an indication of
the required action on a running environment if an option value
is changed.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template or environment. Only needed if you want
to describe the configuration options associated with either the
configuration template or environment.
:type template_name: string
:param template_name: The name of the configuration template whose
configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
the specified options.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
if solution_stack_name:
params['SolutionStackName'] = solution_stack_name
if options:
self.build_list_params(params, options, 'Options.member')
return self._get_response('DescribeConfigurationOptions', params)
def describe_configuration_settings(self, application_name,
template_name=None,
environment_name=None):
"""
Returns a description of the settings for the specified
configuration set, that is, either a configuration template or
the configuration set associated with a running environment.
When describing the settings for the configuration set
associated with a running environment, it is possible to receive
two sets of setting descriptions. One is the deployed
configuration set, and the other is a draft configuration of an
environment that is either in the process of deployment or that
failed to deploy.
:type application_name: string
:param application_name: The application for the environment or
configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
describe. Conditional: You must specify either this parameter or
an EnvironmentName, but not both. If you specify both, AWS Elastic
Beanstalk returns an InvalidParameterCombination error. If you do
not specify either, AWS Elastic Beanstalk returns a
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to describe.
Condition: You must specify either this or a TemplateName, but not
both. If you specify both, AWS Elastic Beanstalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
self.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RebuildEnvironment', params)
def request_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Initiates a request to compile the specified type of
information of the deployed environment. Setting the InfoType
to tail compiles the last lines from the application server log
files of every Amazon EC2 instance in your environment. Use
RetrieveEnvironmentInfo to access the compiled information.
:type info_type: string
:param info_type: The type of information to request.
:type environment_id: string
:param environment_id: The ID of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
requested data. If no such environment is found,
RequestEnvironmentInfo returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RequestEnvironmentInfo', params)
def restart_app_server(self, environment_id=None, environment_name=None):
"""
Causes the environment to restart the application container
server running on each Amazon EC2 instance.
:type environment_id: string
:param environment_id: The ID of the environment to restart the server
for. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to restart the
server for. Condition: You must specify either this or an
EnvironmentId, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RestartAppServer', params)
def retrieve_environment_info(self, info_type='tail', environment_id=None,
environment_name=None):
"""
Retrieves the compiled information from a RequestEnvironmentInfo
request.
:type info_type: string
:param info_type: The type of information to retrieve.
:type environment_id: string
:param environment_id: The ID of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the data's environment. If no such
environment is found, returns an InvalidParameterValue error.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentId, you must specify the
DestinationEnvironmentId.
:type source_environment_name: string
:param source_environment_name: The name of the source environment.
Condition: You must specify at least the SourceEnvironmentID or the
SourceEnvironmentName. You may also specify both. If you specify
the SourceEnvironmentName, you must specify the
DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentId with
the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
environment. Condition: You must specify at least the
DestinationEnvironmentID or the DestinationEnvironmentName. You may
also specify both. You must specify the SourceEnvironmentName with
the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
params['SourceEnvironmentId'] = source_environment_id
if source_environment_name:
params['SourceEnvironmentName'] = source_environment_name
if destination_environment_id:
params['DestinationEnvironmentId'] = destination_environment_id
if destination_environment_name:
params['DestinationEnvironmentName'] = destination_environment_name
return self._get_response('SwapEnvironmentCNAMEs', params)
def terminate_environment(self, environment_id=None, environment_name=None,
terminate_resources=None):
"""Terminates the specified environment.
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
Condition: You must specify either this or an EnvironmentName, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to terminate.
Condition: You must specify either this or an EnvironmentId, or
both. If you do not specify either, AWS Elastic Beanstalk returns
MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
resources should shut down when the environment is terminated:
true: (default) The user AWS resources (for example, the Auto
Scaling group, LoadBalancer, etc.) are terminated along with the
environment. false: The environment is removed from the AWS
Elastic Beanstalk but the AWS resources continue to operate. For
more information, see the AWS Elastic Beanstalk User Guide.
Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if terminate_resources:
params['TerminateResources'] = self._encode_bool(
terminate_resources)
return self._get_response('TerminateEnvironment', params)
def update_application(self, application_name, description=None):
"""
Updates the specified application to have the specified
properties.
:type application_name: string
:param application_name: The name of the application to update.
If no such application is found, UpdateApplication returns an
InvalidParameterValue error.
:type description: string
:param description: A new description for the application. Default: If
not specified, AWS Elastic Beanstalk does not update the
description.
"""
params = {'ApplicationName': application_name}
if description:
params['Description'] = description
return self._get_response('UpdateApplication', params)
def update_application_version(self, application_name, version_label,
description=None):
"""Updates the application version to have the properties.
:type application_name: string
:param application_name: The name of the application associated with
this version. If no application is found with this name,
UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
application version is found with this label, UpdateApplication
returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
"""
params = {'ApplicationName': application_name,
'VersionLabel': version_label}
if description:
params['Description'] = description
return self._get_response('UpdateApplicationVersion', params)
def update_configuration_template(self, application_name, template_name,
description=None, option_settings=None,
options_to_remove=None):
"""
Updates the specified configuration template to have the
specified properties or configuration option values.
:type application_name: string
:param application_name: The name of the application associated with
the configuration template to update. If no application is found
with this name, UpdateConfigurationTemplate returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: The name of the configuration template to update.
If no configuration template is found with this name,
UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
:param option_settings: A list of configuration option settings to
update with the new specified option value.
:type options_to_remove: list
:param options_to_remove: A list of configuration options to remove
from the configuration set. Constraint: You can remove only
UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name,
'TemplateName': template_name}
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
return self._get_response('UpdateConfigurationTemplate', params)
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
configuration template, or updates select configuration option
values in the running environment. Attempting to update both
the release and configuration is not allowed and AWS Elastic
Beanstalk returns an InvalidParameterCombination error. When
updating the configuration settings to a new template or
individual settings, a draft configuration is created and
DescribeConfigurationSettings for this environment returns two
setting descriptions with different DeploymentStatus values.
:type environment_id: string
:param environment_id: The ID of the environment to update. If no
environment with this ID exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentName, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to update. If no
environment with this name exists, AWS Elastic Beanstalk returns an
InvalidParameterValue error. Condition: You must specify either
this or an EnvironmentId, or both. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
:param version_label: If this parameter is specified, AWS Elastic
Beanstalk deploys the named application version to the environment.
If no such application version is found, returns an
InvalidParameterValue error.
:type template_name: string
:param template_name: If this parameter is specified, AWS Elastic
Beanstalk deploys this configuration template to the environment.
If no such configuration template is found, AWS Elastic Beanstalk
returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
Beanstalk updates the description of this environment.
:type option_settings: list
:param option_settings: If specified, AWS Elastic Beanstalk updates the
configuration set associated with the running environment and sets
the specified configuration options to the requested value.
:type options_to_remove: list
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if description:
params['Description'] = description
if option_settings:
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.Name'] = tier_name
params['Tier.Type'] = tier_type
params['Tier.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,
option_settings, template_name=None,
environment_name=None):
"""
Takes a set of configuration settings and either a
configuration template or environment, and determines whether
those values are valid. This action returns a list of messages
indicating any errors or warnings associated with the selection
of option values.
:type application_name: string
:param application_name: The name of the application that the
configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
validate the settings against. Condition: You cannot specify both
this and an environment name.
:type environment_name: string
:param environment_name: The name of the environment to validate the
settings against. Condition: You cannot specify both this and a
configuration template name.
:type option_settings: list
:param option_settings: A list of the options and desired values to
evaluate.
:raises: InsufficientPrivilegesException
"""
params = {'ApplicationName': application_name}
self._build_list_params(params, option_settings,
'OptionSettings.member',
('Namespace', 'OptionName', 'Value'))
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('ValidateConfigurationSettings', params)
def _build_list_params(self, params, user_values, prefix, tuple_names):
# For params such as the ConfigurationOptionSettings,
# they can specify a list of tuples where each tuple maps to a specific
# arg. For example:
# user_values = [('foo', 'bar', 'baz']
# prefix=MyOption.member
# tuple_names=('One', 'Two', 'Three')
# would result in:
# MyOption.member.1.One = foo
# MyOption.member.1.Two = bar
# MyOption.member.1.Three = baz
for i, user_value in enumerate(user_values, 1):
current_prefix = '%s.%s' % (prefix, i)
for key, value in zip(tuple_names, user_value):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
| harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/beanstalk/layer1.py | Python | gpl-3.0 | 56,243 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Libraries to build Recurrent Neural Networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| ivano666/tensorflow | tensorflow/models/rnn/__init__.py | Python | apache-2.0 | 839 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.compute import nova
from cinder import context
from cinder import test
class FakeNovaClient(object):
class Volumes(object):
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
def create_volume_snapshot(self, *args, **kwargs):
pass
def delete_volume_snapshot(self, *args, **kwargs):
pass
class NovaApiTestCase(test.TestCase):
def setUp(self):
super(NovaApiTestCase, self).setUp()
self.api = nova.API()
self.novaclient = FakeNovaClient()
self.ctx = context.get_admin_context()
self.mox.StubOutWithMock(nova, 'novaclient')
def test_update_server_volume(self):
nova.novaclient(self.ctx).AndReturn(self.novaclient)
self.mox.StubOutWithMock(self.novaclient.volumes,
'update_server_volume')
self.novaclient.volumes.update_server_volume('server_id', 'attach_id',
'new_volume_id')
self.mox.ReplayAll()
self.api.update_server_volume(self.ctx, 'server_id', 'attach_id',
'new_volume_id')
| Thingee/cinder | cinder/tests/compute/test_nova.py | Python | apache-2.0 | 1,814 |
# -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python3 -m scripts.create_fakes --user [email protected]
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
To create a project with a complex component structure, pass in a list representing the depth you would
like each component to contain.
Examples:
python3 -m scripts.create_fakes -u fred@cos --components '[1, 1, 1, 1]' --nprojects 1
...will create a project with 4 components.
python3 -m scripts.create_fakes -u fred@cos --components '4' --nprojects 1
...will create a project with a series of components, 4 levels deep.
python3 -m scripts.create_fakes -u fred@cos --components '[1, [1, 1]]' --nprojects 1
...will create a project with two top level components, and one with a depth of 2 components.
python3 -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True
...will create 3 preprints with the default provider osf
python3 -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True --preprintprovider osf,test_provider
...will create 3 preprints with the providers osf and test_provider
"""
from __future__ import print_function, absolute_import
import ast
import sys
import mock
import argparse
import logging
import django
import pytz
from faker import Factory
from faker.providers import BaseProvider
django.setup()
from framework.auth import Auth
from osf_tests.factories import UserFactory, ProjectFactory, NodeFactory, RegistrationFactory, PreprintFactory, PreprintProviderFactory, fake_email
from osf import models
from website.app import init_app
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return ' '.join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return ' '.join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return ''.join(text)
logger = logging.getLogger('create_fakes')
SILENT_LOGGERS = [
'factory',
'website.mails',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake_email()
name = fake.name()
user = UserFactory(username=email, fullname=name,
is_registered=True, emails=[email],
date_registered=fake.date_time(tzinfo=pytz.UTC),
)
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1}>'.format(user.fullname, user.username))
return user
def parse_args():
parser = argparse.ArgumentParser(description='Create fake data.')
parser.add_argument('-u', '--user', dest='user', required=True)
parser.add_argument('--nusers', dest='n_users', type=int, default=3)
parser.add_argument('--nprojects', dest='n_projects', type=int, default=3)
parser.add_argument('-c', '--components', dest='n_components', type=evaluate_argument, default='0')
parser.add_argument('-p', '--privacy', dest='privacy', type=str, default='private', choices=['public', 'private'])
parser.add_argument('-n', '--name', dest='name', type=str, default=None)
parser.add_argument('-t', '--tags', dest='n_tags', type=int, default=5)
parser.add_argument('--presentation', dest='presentation_name', type=str, default=None)
parser.add_argument('-r', '--registration', dest='is_registration', type=bool, default=False)
parser.add_argument('-pre', '--preprint', dest='is_preprint', type=bool, default=False)
parser.add_argument('-preprovider', '--preprintprovider', dest='preprint_provider', type=str, default=None)
return parser.parse_args()
def evaluate_argument(string):
return ast.literal_eval(string)
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags, presentation_name, is_registration, is_preprint, preprint_provider):
auth = Auth(user=creator)
project_title = name if name else fake.science_sentence()
if is_preprint:
provider = None
if preprint_provider:
try:
provider = models.PreprintProvider.objects.get(_id=provider)
except models.PreprintProvider.DoesNotExist:
pass
if not provider:
provider = PreprintProviderFactory(name=fake.science_word())
privacy = 'public'
mock_change_identifier_preprints = mock.patch('website.identifiers.client.CrossRefClient.update_identifier')
mock_change_identifier_preprints.start()
project = PreprintFactory(title=project_title, description=fake.science_paragraph(), creator=creator, provider=provider)
node = project.node
elif is_registration:
project = RegistrationFactory(title=project_title, description=fake.science_paragraph(), creator=creator)
node = project
else:
project = ProjectFactory(title=project_title, description=fake.science_paragraph(), creator=creator)
node = project
node.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
node.add_contributor(contrib, auth=auth)
if isinstance(n_components, int):
for _ in range(n_components):
NodeFactory(parent=node, title=fake.science_sentence(), description=fake.science_paragraph(),
creator=creator)
elif isinstance(n_components, list):
render_generations_from_node_structure_list(node, creator, n_components)
for _ in range(n_tags):
node.add_tag(fake.science_word(), auth=auth)
if presentation_name is not None:
node.add_tag(presentation_name, auth=auth)
node.add_tag('poster', auth=auth)
node.save()
project.save()
logger.info('Created project: {0}'.format(node.title))
return project
def render_generations_from_parent(parent, creator, num_generations):
current_gen = parent
for generation in range(0, num_generations):
next_gen = NodeFactory(
parent=current_gen,
creator=creator,
title=fake.science_sentence(),
description=fake.science_paragraph()
)
current_gen = next_gen
return current_gen
def render_generations_from_node_structure_list(parent, creator, node_structure_list):
new_parent = None
for node_number in node_structure_list:
if isinstance(node_number, list):
render_generations_from_node_structure_list(new_parent or parent, creator, node_number)
else:
new_parent = render_generations_from_parent(parent, creator, node_number)
return new_parent
def main():
args = parse_args()
creator = models.OSFUser.objects.get(username=args.user)
for i in range(args.n_projects):
name = args.name + str(i) if args.name else ''
create_fake_project(creator, args.n_users, args.privacy, args.n_components, name, args.n_tags,
args.presentation_name, args.is_registration, args.is_preprint, args.preprint_provider)
print('Created {n} fake projects.'.format(n=args.n_projects))
sys.exit(0)
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
main()
| Johnetordoff/osf.io | scripts/create_fakes.py | Python | apache-2.0 | 19,763 |
import jinja2
from django_jinja import library
from kitsune.sumo import parser
from kitsune.wiki.diff import BetterHtmlDiff
@library.global_function
def diff_table(content_from, content_to):
"""Creates an HTML diff of the passed in content_from and content_to."""
html_diff = BetterHtmlDiff()
diff = html_diff.make_table(content_from.splitlines(), content_to.splitlines(), context=True)
return jinja2.Markup(diff)
@library.global_function
def generate_video(v):
return jinja2.Markup(parser.generate_video(v))
| mythmon/kitsune | kitsune/wiki/templatetags/jinja_helpers.py | Python | bsd-3-clause | 534 |
from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
try:
get_makefile_filename = sc.get_makefile_filename
except AttributeError:
pass # i.e. PyPy
else:
filename = get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s, stacklevel=2)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
sep = ',' if sys.platform == 'darwin' else '='
return '-Wl,-rpath%s"%s"' % (sep, dir)
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
if is_win64():
return ['-O0']
else:
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
os.close(fid)
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
| maniteja123/numpy | numpy/distutils/fcompiler/gnu.py | Python | bsd-3-clause | 14,957 |
"""
Create movie from MEG inverse solution
=======================================
Data were computed using mne-python (http://martinos.org/mne)
"""
import os
import numpy as np
from surfer import Brain
from surfer.io import read_stc
print(__doc__)
"""
create Brain object for visualization
"""
brain = Brain('fsaverage', 'split', 'inflated', size=(800, 400))
"""
read and display MNE dSPM inverse solution
"""
stc_fname = os.path.join('example_data', 'meg_source_estimate-%s.stc')
for hemi in ['lh', 'rh']:
stc = read_stc(stc_fname % hemi)
data = stc['data']
times = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin']
brain.add_data(data, colormap='hot', vertices=stc['vertices'],
smoothing_steps=10, time=times, hemi=hemi,
time_label=lambda t: '%s ms' % int(round(t * 1e3)))
"""
scale colormap
"""
brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
"""
Save a movie. Use a large value for time_dilation because the sample stc only
covers 30 ms.
"""
brain.save_movie('example_current.mov', time_dilation=30)
brain.close()
| diego0020/PySurfer | examples/save_movie.py | Python | bsd-3-clause | 1,110 |
# $Id$
"""Mixins that are useful for classes using vtk_kit.
@author: Charl P. Botha <http://cpbotha.net/>
"""
from external.vtkPipeline.ConfigVtkObj import ConfigVtkObj
from external.vtkPipeline.vtkMethodParser import VtkMethodParser
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin # temporary
import module_utils # temporary, most of this should be in utils.
import re
import types
import utils
#########################################################################
class PickleVTKObjectsModuleMixin(object):
"""This mixin will pickle the state of all vtk objects whose binding
attribute names have been added to self._vtkObjects, e.g. if you have
a self._imageMath, '_imageMath' should be in the list.
Your module has to derive from module_base as well so that it has a
self._config!
Remember to call the __init__ of this class with the list of attribute
strings representing vtk objects that you want pickled. All the objects
have to exist and be initially configured by then.
Remember to call close() when your child class close()s.
"""
def __init__(self, vtkObjectNames):
# you have to add the NAMES of the objects that you want pickled
# to this list.
self._vtkObjectNames = vtkObjectNames
self.statePattern = re.compile ("To[A-Z0-9]")
# make sure that the state of the vtkObjectNames objects is
# encapsulated in the initial _config
self.logic_to_config()
def close(self):
# make sure we get rid of these bindings as well
del self._vtkObjectNames
def logic_to_config(self):
parser = VtkMethodParser()
for vtkObjName in self._vtkObjectNames:
# pickled data: a list with toggle_methods, state_methods and
# get_set_methods as returned by the vtkMethodParser. Each of
# these is a list of tuples with the name of the method (as
# returned by the vtkMethodParser) and the value; in the case
# of the stateMethods, we use the whole stateGroup instead of
# just a single name
vtkObjPD = [[], [], []]
vtkObj = getattr(self, vtkObjName)
parser.parse_methods(vtkObj)
# parser now has toggle_methods(), state_methods() and
# get_set_methods();
# toggle_methods: ['BlaatOn', 'AbortExecuteOn']
# state_methods: [['SetBlaatToOne', 'SetBlaatToTwo'],
# ['SetMaatToThree', 'SetMaatToFive']]
# get_set_methods: ['NumberOfThreads', 'Progress']
for method in parser.toggle_methods():
# if you query ReleaseDataFlag on a filter with 0 outputs,
# VTK yields an error
if vtkObj.GetNumberOfOutputPorts() == 0 and \
method == 'ReleaseDataFlagOn':
continue
# we need to snip the 'On' off
val = eval("vtkObj.Get%s()" % (method[:-2],))
vtkObjPD[0].append((method, val))
for stateGroup in parser.state_methods():
# we search up to the To
end = self.statePattern.search (stateGroup[0]).start ()
# so we turn SetBlaatToOne to GetBlaat
get_m = 'G'+stateGroup[0][1:end]
# we're going to have to be more clever when we set_config...
# use a similar trick to get_state in vtkMethodParser
val = eval('vtkObj.%s()' % (get_m,))
vtkObjPD[1].append((stateGroup, val))
for method in parser.get_set_methods():
val = eval('vtkObj.Get%s()' % (method,))
vtkObjPD[2].append((method, val))
# finally set the pickle data in the correct position
setattr(self._config, vtkObjName, vtkObjPD)
def config_to_logic(self):
# go through at least the attributes in self._vtkObjectNames
for vtkObjName in self._vtkObjectNames:
try:
vtkObjPD = getattr(self._config, vtkObjName)
vtkObj = getattr(self, vtkObjName)
except AttributeError:
print "PickleVTKObjectsModuleMixin: %s not available " \
"in self._config OR in self. Skipping." % (vtkObjName,)
else:
for method, val in vtkObjPD[0]:
if val:
eval('vtkObj.%s()' % (method,))
else:
# snip off the On
eval('vtkObj.%sOff()' % (method[:-2],))
for stateGroup, val in vtkObjPD[1]:
# keep on calling the methods in stategroup until
# the getter returns a value == val.
end = self.statePattern.search(stateGroup[0]).start()
getMethod = 'G'+stateGroup[0][1:end]
for i in range(len(stateGroup)):
m = stateGroup[i]
eval('vtkObj.%s()' % (m,))
tempVal = eval('vtkObj.%s()' % (getMethod,))
if tempVal == val:
# success! break out of the for loop
break
for method, val in vtkObjPD[2]:
try:
eval('vtkObj.Set%s(val)' % (method,))
except TypeError:
if type(val) in [types.TupleType, types.ListType]:
# sometimes VTK wants the separate elements
# and not the tuple / list
eval("vtkObj.Set%s(*val)"%(method,))
else:
# re-raise the exception if it wasn't a
# tuple/list
raise
#########################################################################
# note that the pickle mixin comes first, as its config_to_logic/logic_to_config
# should be chosen over that of noConfig
class SimpleVTKClassModuleBase(PickleVTKObjectsModuleMixin,
IntrospectModuleMixin,
ModuleBase):
"""Use this base to make a DeVIDE module that wraps a single VTK
object. The state of the VTK object will be saved when the network
is.
You only have to override the __init__ method and call the __init__
of this class with the desired parameters.
The __doc__ string of your module class will be replaced with the
__doc__ string of the encapsulated VTK class (and will thus be
shown if the user requests module help). If you don't want this,
call the ctor with replaceDoc=False.
inputFunctions is a list of the complete methods that have to be called
on the encapsulated VTK class, e.g. ['SetInput1(inputStream)',
'SetInput1(inputStream)']. The same goes for outputFunctions, except that
there's no inputStream involved. Use None in both cases if you want
the default to be used (SetInput(), GetOutput()).
"""
def __init__(self, module_manager, vtkObjectBinding, progressText,
inputDescriptions, outputDescriptions,
replaceDoc=True,
inputFunctions=None, outputFunctions=None):
self._viewFrame = None
self._configVtkObj = None
# first these two mixins
ModuleBase.__init__(self, module_manager)
self._theFilter = vtkObjectBinding
if replaceDoc:
myMessage = "<em>"\
"This is a special DeVIDE module that very simply " \
"wraps a single VTK class. In general, the " \
"complete state of the class will be saved along " \
"with the rest of the network. The documentation " \
"below is that of the wrapped VTK class:</em>"
self.__doc__ = '%s\n\n%s' % (myMessage, self._theFilter.__doc__)
# now that we have the object, init the pickle mixin so
# that the state of this object will be saved
PickleVTKObjectsModuleMixin.__init__(self, ['_theFilter'])
# make progress hooks for the object
module_utils.setup_vtk_object_progress(self, self._theFilter,
progressText)
self._inputDescriptions = inputDescriptions
self._outputDescriptions = outputDescriptions
self._inputFunctions = inputFunctions
self._outputFunctions = outputFunctions
def _createViewFrame(self):
parentWindow = self._module_manager.get_module_view_parent_window()
import resources.python.defaultModuleViewFrame
reload(resources.python.defaultModuleViewFrame)
dMVF = resources.python.defaultModuleViewFrame.defaultModuleViewFrame
viewFrame = module_utils.instantiate_module_view_frame(
self, self._module_manager, dMVF)
# ConfigVtkObj parent not important, we're passing frame + panel
# this should populate the sizer with a new sizer7
# params: noParent, noRenwin, vtk_obj, frame, panel
self._configVtkObj = ConfigVtkObj(None, None,
self._theFilter,
viewFrame, viewFrame.viewFramePanel)
module_utils.create_standard_object_introspection(
self, viewFrame, viewFrame.viewFramePanel,
{'Module (self)' : self}, None)
# we don't want the Execute button to be default... else stuff gets
# executed with every enter in the command window (at least in Doze)
module_utils.create_eoca_buttons(self, viewFrame,
viewFrame.viewFramePanel,
False)
self._viewFrame = viewFrame
return viewFrame
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
PickleVTKObjectsModuleMixin.close(self)
IntrospectModuleMixin.close(self)
if self._viewFrame is not None:
self._configVtkObj.close()
self._viewFrame.Destroy()
ModuleBase.close(self)
# get rid of our binding to the vtkObject
del self._theFilter
def get_output_descriptions(self):
return self._outputDescriptions
def get_output(self, idx):
# this will only every be invoked if your get_output_descriptions has
# 1 or more elements
if self._outputFunctions:
return eval('self._theFilter.%s' % (self._outputFunctions[idx],))
else:
return self._theFilter.GetOutput()
def get_input_descriptions(self):
return self._inputDescriptions
def set_input(self, idx, inputStream):
# this will only be called for a certain idx if you've specified that
# many elements in your get_input_descriptions
if self._inputFunctions:
exec('self._theFilter.%s' %
(self._inputFunctions[idx]))
else:
if idx == 0:
self._theFilter.SetInput(inputStream)
else:
self._theFilter.SetInput(idx, inputStream)
def execute_module(self):
# it could be a writer, in that case, call the Write method.
if hasattr(self._theFilter, 'Write') and \
callable(self._theFilter.Write):
self._theFilter.Write()
else:
self._theFilter.Update()
def streaming_execute_module(self):
"""All VTK classes should be streamable.
"""
# it could be a writer, in that case, call the Write method.
if hasattr(self._theFilter, 'Write') and \
callable(self._theFilter.Write):
self._theFilter.Write()
else:
self._theFilter.Update()
def view(self):
if self._viewFrame is None:
# we have an initial config populated with stuff and in sync
# with theFilter. The viewFrame will also be in sync with the
# filter
self._viewFrame = self._createViewFrame()
self._viewFrame.Show(True)
self._viewFrame.Raise()
def config_to_view(self):
# the pickleVTKObjectsModuleMixin does logic <-> config
# so when the user clicks "sync", logic_to_config is called
# which transfers picklable state from the LOGIC to the CONFIG
# then we do double the work and call update_gui, which transfers
# the same state from the LOGIC straight up to the VIEW
self._configVtkObj.update_gui()
def view_to_config(self):
# same thing here: user clicks "apply", view_to_config is called which
# zaps UI changes straight to the LOGIC. Then we have to call
# logic_to_config explicitly which brings the info back up to the
# config... i.e. view -> logic -> config
# after that, config_to_logic is called which transfers all state AGAIN
# from the config to the logic
self._configVtkObj.apply_changes()
self.logic_to_config()
#########################################################################
| nagyistoce/devide | module_kits/vtk_kit/mixins.py | Python | bsd-3-clause | 13,612 |
#!/usr/bin/python
#
# Copyright (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
requirements: [ boto3 ]
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used.
required: false
default: memcached
choices: ['redis', 'memcached']
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: None
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have. Required when state=present.
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: None
cache_parameter_group:
description:
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
for the specified engine will be used.
required: false
default: None
version_added: "2.0"
aliases: [ 'parameter_group' ]
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: false
default: None
version_added: "2.0"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: None
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
required: false
default: None
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
default: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- elasticache:
name: "test-please-delete"
state: rebooted
"""
from time import sleep
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
try:
import boto3
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
self.name = name
self.engine = engine.lower()
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_parameter_group = cache_parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.region = region
self.aws_connect_kwargs = aws_connect_kwargs
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
kwargs = dict(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeType=self.node_type,
Engine=self.engine,
EngineVersion=self.cache_engine_version,
CacheSecurityGroupNames=self.cache_security_groups,
SecurityGroupIds=self.security_group_ids,
CacheParameterGroupName=self.cache_parameter_group,
CacheSubnetGroupName=self.cache_subnet_group)
if self.cache_port is not None:
kwargs['Port'] = self.cache_port
if self.zone is not None:
kwargs['PreferredAvailabilityZone'] = self.zone
try:
self.conn.create_cache_cluster(**kwargs)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (self.name, self.status))
try:
response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
cache_cluster_data = response['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('gone')
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
msg = "'%s' is %s. Cannot sync."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
return
if self._requires_destroy_and_create():
if not self.hard_modify:
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
if not self.wait:
msg = "'%s' requires destructive modification. 'wait' must be set to true."
self.module.fail_json(msg=msg % self.name)
self.delete()
self.create()
return
if self._requires_modification():
self.modify()
def modify(self):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
self.conn.modify_cache_cluster(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeIdsToRemove=nodes_to_remove,
CacheSecurityGroupNames=self.cache_security_groups,
CacheParameterGroupName=self.cache_parameter_group,
SecurityGroupIds=self.security_group_ids,
ApplyImmediately=True,
EngineVersion=self.cache_engine_version)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
msg = "'%s' is %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status == 'rebooting':
return
if self.status in ['creating', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
# Collect ALL nodes for reboot
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
try:
self.conn.reboot_cache_cluster(CacheClusterId=self.name,
CacheNodeIdsToReboot=cache_node_ids)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
def get_info(self):
"""Return basic info about the cache cluster"""
info = {
'name': self.name,
'status': self.status
}
if self.data:
info['data'] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
status_map = {
'creating': 'available',
'rebooting': 'available',
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))
if awaited_status not in set(status_map.values()):
msg = "'%s' is not a valid awaited status."
self.module.fail_json(msg=msg % awaited_status)
while True:
sleep(1)
self._refresh_data()
if self.status == awaited_status:
break
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
modifiable_data = {
'NumCacheNodes': self.num_nodes,
'EngineVersion': self.cache_engine_version
}
for key, value in modifiable_data.items():
if value is not None and self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) != set(self.cache_security_groups):
return True
# check vpc security groups
if self.security_group_ids:
vpc_security_groups = []
security_groups = self.data['SecurityGroups'] or []
for sg in security_groups:
vpc_security_groups.append(sg['SecurityGroupId'])
if set(vpc_security_groups) != set(self.security_group_ids):
return True
return False
def _requires_destroy_and_create(self):
"""
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
'node_type': self.data['CacheNodeType'],
'engine': self.data['Engine'],
'cache_port': self._get_port()
}
# Only check for modifications if zone is specified
if self.zone is not None:
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
for key, value in unmodifiable_data.items():
if getattr(self, key) is not None and getattr(self, key) != value:
return True
return False
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True)
if region:
return boto3_conn(self.module, conn_type='client', resource='elasticache',
region=region, endpoint=ec2_url, **aws_connect_params)
else:
self.module.fail_json(msg="region must be specified")
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
if self.data['Engine'] == 'memcached':
return self.data['ConfigurationEndpoint']['Port']
elif self.data['Engine'] == 'redis':
# Redis only supports a single node (presently) so just use
# the first and only
return self.data['CacheNodes'][0]['Endpoint']['Port']
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'CacheClusterNotFound':
self.data = None
self.status = 'gone'
return
else:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
cache_cluster_data = response['CacheClusters'][0]
self.data = cache_cluster_data
self.status = self.data['CacheClusterStatus']
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
if self.status == 'rebooting cache cluster nodes':
self.status = 'rebooting'
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
if num_nodes_to_remove <= 0:
return []
if not self.hard_modify:
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
return cache_node_ids[-num_nodes_to_remove:]
def main():
""" elasticache ansible module """
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'rebooted']),
name=dict(required=True),
engine=dict(default='memcached'),
cache_engine_version=dict(default=""),
node_type=dict(default='cache.t2.small'),
num_nodes=dict(default=1, type='int'),
# alias for compat with the original PR 1950
cache_parameter_group=dict(default="", aliases=['parameter_group']),
cache_port=dict(type='int'),
cache_subnet_group=dict(default=""),
cache_security_groups=dict(default=[], type='list'),
security_group_ids=dict(default=[], type='list'),
zone=dict(),
wait=dict(default=True, type='bool'),
hard_modify=dict(type='bool')
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
name = module.params['name']
state = module.params['state']
engine = module.params['engine']
cache_engine_version = module.params['cache_engine_version']
node_type = module.params['node_type']
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_subnet_group = module.params['cache_subnet_group']
cache_security_groups = module.params['cache_security_groups']
security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
cache_parameter_group = module.params['cache_parameter_group']
if cache_subnet_group and cache_security_groups:
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
if state == 'present' and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
cache_parameter_group,
cache_subnet_group,
cache_security_groups,
security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs)
if state == 'present':
elasticache_manager.ensure_present()
elif state == 'absent':
elasticache_manager.ensure_absent()
elif state == 'rebooted':
elasticache_manager.ensure_rebooted()
facts_result = dict(changed=elasticache_manager.changed,
elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| wrouesnel/ansible | lib/ansible/modules/cloud/amazon/elasticache.py | Python | gpl-3.0 | 20,816 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012+ BREMSKERL-REIBBELAGWERKE EMMERLING GmbH & Co. KG
# Author Marco Dieckhoff
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Stock Move Backdating",
"version": "1.0",
'author': ['Marco Dieckhoff, BREMSKERL', 'Agile Business Group'],
"category": "Stock Logistics",
'website': 'www.bremskerl.com',
"depends": ["stock"],
"summary": "Allows back-dating of stock moves",
"description": """This module allows to register old stock moves
(with date != now).
On stock moves, user can specify the "Actual Movement Date", that will be
used as movement date""",
'data': [
"view/stock_view.xml",
"wizard/stock_partial_picking_view.xml",
],
'demo': [],
'installable': False,
}
| yvaucher/stock-logistics-workflow | __unported__/stock_move_backdating/__openerp__.py | Python | agpl-3.0 | 1,678 |
Subsets and Splits