repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/test/test_threadedtempfile.py
|
171
|
2192
|
"""
Create and delete FILES_PER_THREAD temp files (via tempfile.TemporaryFile)
in each of NUM_THREADS threads, recording the number of successes and
failures. A failure is a bug in tempfile, and may be due to:
+ Trying to create more than one tempfile with the same name.
+ Trying to delete a tempfile that doesn't still exist.
+ Something we've never seen before.
By default, NUM_THREADS == 20 and FILES_PER_THREAD == 50. This is enough to
create about 150 failures per run under Win98SE in 2.0, and runs pretty
quickly. Guido reports needing to boost FILES_PER_THREAD to 500 before
provoking a 2.0 failure under Linux.
"""
NUM_THREADS = 20
FILES_PER_THREAD = 50
import tempfile
from test.support import threading_setup, threading_cleanup, run_unittest, import_module
threading = import_module('threading')
import unittest
import io
from traceback import print_exc
startEvent = threading.Event()
class TempFileGreedy(threading.Thread):
error_count = 0
ok_count = 0
def run(self):
self.errors = io.StringIO()
startEvent.wait()
for i in range(FILES_PER_THREAD):
try:
f = tempfile.TemporaryFile("w+b")
f.close()
except:
self.error_count += 1
print_exc(file=self.errors)
else:
self.ok_count += 1
class ThreadedTempFileTest(unittest.TestCase):
def test_main(self):
threads = []
thread_info = threading_setup()
for i in range(NUM_THREADS):
t = TempFileGreedy()
threads.append(t)
t.start()
startEvent.set()
ok = 0
errors = []
for t in threads:
t.join()
ok += t.ok_count
if t.error_count:
errors.append(str(t.name) + str(t.errors.getvalue()))
threading_cleanup(*thread_info)
msg = "Errors: errors %d ok %d\n%s" % (len(errors), ok,
'\n'.join(errors))
self.assertEqual(errors, [], msg)
self.assertEqual(ok, NUM_THREADS * FILES_PER_THREAD)
def test_main():
run_unittest(ThreadedTempFileTest)
if __name__ == "__main__":
test_main()
|
gpl-2.0
|
eeshangarg/oh-mainline
|
vendor/packages/python-social-auth/social/tests/backends/test_twitch.py
|
87
|
1050
|
import json
from social.tests.backends.oauth import OAuth2Test
class TwitchOAuth2Test(OAuth2Test):
backend_path = 'social.backends.twitch.TwitchOAuth2'
user_data_url = 'https://api.twitch.tv/kraken/user/'
expected_username = 'test_user1'
access_token_body = json.dumps({
'access_token': 'foobar',
})
user_data_body = json.dumps({
'type': 'user',
'name': 'test_user1',
'created_at': '2011-06-03T17:49:19Z',
'updated_at': '2012-06-18T17:19:57Z',
'_links': {
'self': 'https://api.twitch.tv/kraken/users/test_user1'
},
'logo': 'http://static-cdn.jtvnw.net/jtv_user_pictures/'
'test_user1-profile_image-62e8318af864d6d7-300x300.jpeg',
'_id': 22761313,
'display_name': 'test_user1',
'email': '[email protected]',
'partnered': True,
'bio': 'test bio woo I\'m a test user'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
agpl-3.0
|
leafclick/intellij-community
|
python/testData/inspections/PyTypeCheckerInspection/Generator.py
|
30
|
3112
|
def test():
def gen(n):
for x in xrange(n):
yield str(x)
def f_1(xs):
"""
:type xs: list of int
"""
return xs
def f_2(xs):
"""
:type xs: collections.Sequence of int
"""
return xs
def f_3(xs):
"""
:type xs: collections.Container of int
"""
return xs
def f_4(xs):
"""
:type xs: collections.Iterator of int
"""
return xs
def f_5(xs):
"""
:type xs: collections.Iterable of int
"""
return xs
def f_6(xs):
"""
:type xs: list
"""
return xs
def f_7(xs):
"""
:type xs: collections.Sequence
"""
return xs
def f_8(xs):
"""
:type xs: collections.Container
"""
return xs
def f_9(xs):
"""
:type xs: collections.Iterator
"""
return xs
def f_10(xs):
"""
:type xs: collections.Iterable
"""
return xs
def f_11(xs):
"""
:type xs: list of string
"""
return xs
def f_12(xs):
"""
:type xs: collections.Sequence of string
"""
return xs
def f_13(xs):
"""
:type xs: collections.Container of string
"""
return xs
def f_14(xs):
"""
:type xs: collections.Iterator of string
"""
return xs
def f_15(xs):
"""
:type xs: collections.Iterable of string
"""
return xs
return [
''.join(gen(10)),
f_1(<warning descr="Expected type 'List[int]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_2(<warning descr="Expected type 'Sequence[int]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_3(<warning descr="Expected type 'Container[int]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_4(<warning descr="Expected type 'Iterator[int]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_5(<warning descr="Expected type 'Iterable[int]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_6(<warning descr="Expected type 'list', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_7(<warning descr="Expected type 'Sequence', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_8(<warning descr="Expected type 'Container', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_9(gen(11)),
f_10(gen(11)),
f_11(<warning descr="Expected type 'List[Union[str, unicode]]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_12(<warning descr="Expected type 'Sequence[Union[str, unicode]]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_13(<warning descr="Expected type 'Container[Union[str, unicode]]', got 'Generator[str, Any, None]' instead">gen(11)</warning>),
f_14(gen(11)),
f_15(gen(11)),
f_15('foo'.split('o')),
]
|
apache-2.0
|
sigma/vmw.vco
|
src/vmw/vco/types.py
|
1
|
1824
|
# Copyright (c) 2009-2010 VMware, Inc. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from vmw.vco.generated.VSOWebControlService_types import ns0
from vmw.ZSI.schema import GTD
__schema = ns0.targetNamespace
def __getClass(name):
return GTD(__schema, name)(name).pyclass
Workflow = __getClass("Workflow")
WorkflowToken = __getClass("WorkflowToken")
WorkflowTokenAttribute = __getClass("WorkflowTokenAttribute")
WorkflowParameter = __getClass("WorkflowParameter")
ArrayOfWorkflowParameter = __getClass("ArrayOfWorkflowParameter")
ModuleInfo = __getClass("ModuleInfo")
FinderResult = __getClass("FinderResult")
ArrayOfFinderResult = __getClass("ArrayOfFinderResult")
Property = __getClass("Property")
ArrayOfProperty = __getClass("ArrayOfProperty")
QueryResult = __getClass("QueryResult")
|
mit
|
cloudera/zookeeper
|
src/contrib/zkpython/src/test/close_deadlock_test.py
|
164
|
1574
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zookeeper, zktestbase, unittest, threading
import time
class CloseDeadlockTest(zktestbase.TestBase):
"""
This tests for the issue found in
https://issues.apache.org/jira/browse/ZOOKEEPER-763
zookeeper.close blocks on waiting for all completions to
finish. Previously it was doing so while holding teh GIL, stopping
any completions from actually continuing.
This test is a failure if it does not exit within a few seconds.
"""
def deadlock():
cv = threading.Condition()
def callback(*args):
cv.acquire()
cv.notifyAll()
cv.release()
time.sleep(1)
cv.acquire()
zookeeper.aget(handle, "/", None, callback)
cv.wait()
zookeeper.close(handle)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
smarinac/root
|
interpreter/llvm/src/utils/DSAclean.py
|
147
|
1187
|
#! /usr/bin/python
#changelog:
#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
#nodes such as %tmp.1.i and %tmp._i.3
#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
#%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
#the comments
#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
#than removing all lines for which the lable CONTAINS %tmp.#
import re
import sys
if( len(sys.argv) < 3 ):
print 'usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>'
sys.exit(1)
#get a file object
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
#we'll get this one line at a time...while we could just put the whole thing in a string
#it would kill old computers
buffer = input.readline()
while buffer != '':
if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
#skip next line, write neither this line nor the next
buffer = input.readline()
else:
#this isn't a tmp Node, we can write it
output.write(buffer)
#prepare for the next iteration
buffer = input.readline()
input.close()
output.close()
|
lgpl-2.1
|
4Quant/tensorflow
|
tensorflow/python/ops/seq2seq.py
|
2
|
48249
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for creating sequence-to-sequence models in TensorFlow.
Sequence-to-sequence recurrent neural networks can learn complex functions
that map input sequences to output sequences. These models yield very good
results on a number of tasks, such as speech recognition, parsing, machine
translation, or even constructing automated replies to emails.
Before using this module, it is recommended to read the TensorFlow tutorial
on sequence-to-sequence models. It explains the basic concepts of this module
and shows an end-to-end example of how to build a translation model.
https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html
Here is an overview of functions available in this module. They all use
a very similar interface, so after reading the above tutorial and using
one of them, others should be easy to substitute.
* Full sequence-to-sequence models.
- basic_rnn_seq2seq: The most basic RNN-RNN model.
- tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.
- embedding_rnn_seq2seq: The basic model with input embedding.
- embedding_tied_rnn_seq2seq: The tied model with input embedding.
- embedding_attention_seq2seq: Advanced model with input embedding and
the neural attention mechanism; recommended for complex tasks.
* Multi-task sequence-to-sequence models.
- one2many_rnn_seq2seq: The embedding model with multiple decoders.
* Decoders (when you write your own encoder, you can use these to decode;
e.g., if you want to write a model that generates captions for images).
- rnn_decoder: The basic decoder based on a pure RNN.
- attention_decoder: A decoder that uses the attention mechanism.
* Losses.
- sequence_loss: Loss for a sequence model returning average log-perplexity.
- sequence_loss_by_example: As above, but not averaging over all examples.
* model_with_buckets: A convenience function to create models with bucketing
(see the tutorial above for an explanation of why and how to use it).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We disable pylint because we need python3 compatibility.
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with variable_scope.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
if loop_function is not None:
prev = output
return outputs, state
def basic_rnn_seq2seq(
encoder_inputs, decoder_inputs, cell, dtype=dtypes.float32, scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
_, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
loop_function=None, dtype=dtypes.float32, scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(decoder_inputs, enc_state, cell,
loop_function=loop_function, scope=scope)
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
output_projection=None, feed_previous=False,
update_embedding_for_previous=True, scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099.
If False, decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has
no effect if feed_previous=False.
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = ops.convert_to_tensor(
output_projection[0], dtype=dtypes.float32)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = ops.convert_to_tensor(
output_projection[1], dtype=dtypes.float32)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
with ops.device("/cpu:0"):
embedding = variable_scope.get_variable("embedding",
[num_symbols, cell.input_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = (
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
return rnn_decoder(emb_inp, initial_state, cell,
loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
output_projection=None, feed_previous=False,
dtype=dtypes.float32, scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
_, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(
decoder_inputs, encoder_state, cell, num_decoder_symbols,
output_projection=output_projection, feed_previous=feed_previous)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs, encoder_state, cell, num_decoder_symbols,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
return outputs_and_state[:-1], outputs_and_state[-1]
def embedding_tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_symbols, output_projection=None,
feed_previous=False, dtype=dtypes.float32,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x cell.input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_symbols: Integer; number of symbols for both encoder and decoder.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(scope or "embedding_tied_rnn_seq2seq"):
with ops.device("/cpu:0"):
embedding = variable_scope.get_variable("embedding",
[num_symbols, cell.input_size])
emb_encoder_inputs = [embedding_ops.embedding_lookup(embedding, x)
for x in encoder_inputs]
emb_decoder_inputs = [embedding_ops.embedding_lookup(embedding, x)
for x in decoder_inputs]
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_symbols)
if isinstance(feed_previous, bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, True) if feed_previous else None
return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, False) if feed_previous_bool else None
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=reuse):
outputs, state = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
return outputs_and_state[:-1], outputs_and_state[-1]
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
output_size=None, num_heads=1, loop_function=None,
dtype=dtypes.float32, scope=None,
initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(scope or "attention_decoder"):
batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(
attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = variable_scope.get_variable("AttnW_%d" % a,
[1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(variable_scope.get_variable("AttnV_%d" % a,
[attention_vec_size]))
state = initial_state
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
for a in xrange(num_heads):
with variable_scope.variable_scope("Attention_%d" % a):
y = rnn_cell.linear(query, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(
v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
# Now calculate the attention-weighted vector d.
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,
[1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = array_ops.pack([batch_size, attn_size])
attns = [array_ops.zeros(batch_attn_size, dtype=dtype)
for _ in xrange(num_heads)]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
if initial_state_attention:
attns = attention(initial_state)
for i, inp in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
# Merge input and previous attentions into one vector of the right size.
x = rnn_cell.linear([inp] + attns, cell.input_size, True)
# Run the RNN.
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=True):
attns = attention(state)
else:
attns = attention(state)
with variable_scope.variable_scope("AttnOutputProjection"):
output = rnn_cell.linear([cell_output] + attns, output_size, True)
if loop_function is not None:
prev = output
outputs.append(output)
return outputs, state
def embedding_attention_decoder(decoder_inputs, initial_state, attention_states,
cell, num_symbols, num_heads=1,
output_size=None, output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=dtypes.float32, scope=None,
initial_state_attention=False):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
num_heads: Number of attention heads that read from attention_states.
output_size: Size of the output vectors; if None, use cell.output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099.
If False, decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has
no effect if feed_previous=False.
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(scope or "embedding_attention_decoder"):
with ops.device("/cpu:0"):
embedding = variable_scope.get_variable("embedding",
[num_symbols, cell.input_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = [
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs]
return attention_decoder(
emb_inp, initial_state, attention_states, cell, output_size=output_size,
num_heads=num_heads, loop_function=loop_function,
initial_state_attention=initial_state_attention)
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
num_heads=1, output_projection=None,
feed_previous=False, dtype=dtypes.float32,
scope=None, initial_state_attention=False):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
encoder_outputs, encoder_state = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [array_ops.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = array_ops.concat(1, top_states)
# Decoder.
output_size = None
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs, encoder_state, attention_states, cell,
num_decoder_symbols, num_heads=num_heads, output_size=output_size,
output_projection=output_projection, feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs, encoder_state, attention_states, cell,
num_decoder_symbols, num_heads=num_heads, output_size=output_size,
output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
return outputs_and_state[:-1], outputs_and_state[-1]
def one2many_rnn_seq2seq(encoder_inputs, decoder_inputs_dict, cell,
num_encoder_symbols, num_decoder_symbols_dict,
feed_previous=False, dtype=dtypes.float32, scope=None):
"""One-to-many RNN sequence-to-sequence model (multi-task).
This is a multi-task sequence-to-sequence model with one encoder and multiple
decoders. Reference to multi-task sequence-to-sequence learning can be found
here: http://arxiv.org/abs/1511.06114
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs_dict: A dictionany mapping decoder name (string) to
the corresponding decoder_inputs; each decoder_inputs is a list of 1D
Tensors of shape [batch_size]; num_decoders is defined as
len(decoder_inputs_dict).
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an
integer specifying number of symbols for the corresponding decoder;
len(num_decoder_symbols_dict) must be equal to num_decoders.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"one2many_rnn_seq2seq"
Returns:
A tuple of the form (outputs_dict, state_dict), where:
outputs_dict: A mapping from decoder name (string) to a list of the same
length as decoder_inputs_dict[name]; each element in the list is a 2D
Tensors with shape [batch_size x num_decoder_symbol_list[name]]
containing the generated outputs.
state_dict: A mapping from decoder name (string) to the final state of the
corresponding decoder RNN; it is a 2D Tensor of shape
[batch_size x cell.state_size].
"""
outputs_dict = {}
state_dict = {}
with variable_scope.variable_scope(scope or "one2many_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
_, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
for name, decoder_inputs in decoder_inputs_dict.items():
num_decoder_symbols = num_decoder_symbols_dict[name]
with variable_scope.variable_scope("one2many_decoder_" + str(name)):
decoder_cell = rnn_cell.OutputProjectionWrapper(cell,
num_decoder_symbols)
if isinstance(feed_previous, bool):
outputs, state = embedding_rnn_decoder(
decoder_inputs, encoder_state, decoder_cell, num_decoder_symbols,
feed_previous=feed_previous)
else:
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def filled_embedding_rnn_decoder(feed_previous):
# pylint: disable=cell-var-from-loop
reuse = None if feed_previous else True
vs = variable_scope.get_variable_scope()
with variable_scope.variable_scope(vs, reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs, encoder_state, decoder_cell,
num_decoder_symbols, feed_previous=feed_previous)
# pylint: enable=cell-var-from-loop
return outputs + [state]
outputs_and_state = control_flow_ops.cond(
feed_previous,
lambda: filled_embedding_rnn_decoder(True),
lambda: filled_embedding_rnn_decoder(False))
outputs = outputs_and_state[:-1]
state = outputs_and_state[-1]
outputs_dict[name] = outputs
state_dict[name] = state
return outputs_dict, state_dict
def sequence_loss_by_example(logits, targets, weights,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
# We need to make target and int64-tensor and set its shape.
target = array_ops.reshape(math_ops.to_int64(target), [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logit, target)
else:
crossent = softmax_loss_function(logit, target)
log_perp_list.append(crossent * weight)
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits, targets, weights,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: The average log-perplexity per symbol (weighted).
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
with ops.op_scope(logits + targets + weights, name, "sequence_loss"):
cost = math_ops.reduce_sum(sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = array_ops.shape(targets[0])[0]
return cost / math_ops.cast(batch_size, dtypes.float32)
else:
return cost
def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,
buckets, seq2seq, softmax_loss_function=None,
per_example_loss=False, name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
weights: List of 1D batch-sized float-Tensors to weight the targets.
buckets: A list of pairs of (input size, output size) for each bucket.
seq2seq: A sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be
a scalar with the averaged loss from all examples.
name: Optional name for this operation, defaults to "model_with_buckets".
Returns:
A tuple of the form (outputs, losses), where:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors of shape [batch_size x num_decoder_symbols] (jth outputs).
losses: List of scalar Tensors, representing losses for each bucket, or,
if per_example_loss is set, a list of 1D batch-sized float Tensors.
Raises:
ValueError: If length of encoder_inputsut, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with ops.op_scope(all_inputs, name, "model_with_buckets"):
for j, bucket in enumerate(buckets):
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=True if j > 0 else None):
bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],
decoder_inputs[:bucket[1]])
outputs.append(bucket_outputs)
if per_example_loss:
losses.append(sequence_loss_by_example(
outputs[-1], targets[:bucket[1]], weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
else:
losses.append(sequence_loss(
outputs[-1], targets[:bucket[1]], weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
return outputs, losses
|
apache-2.0
|
LennonChin/Django-Practices
|
MxOnline/extra_apps/xadmin/plugins/auth.py
|
1
|
9482
|
# coding=utf-8
from django import forms
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,
AdminPasswordChangeForm, PasswordChangeForm)
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_post_parameters
from django.forms import ModelMultipleChoiceField
from django.contrib.auth.models import User
from xadmin.layout import Fieldset, Main, Side, Row, FormHelper
from xadmin.sites import site
from xadmin.util import unquote
from xadmin.views import BaseAdminPlugin, ModelFormAdminView, ModelAdminView, CommAdminView, csrf_protect_m
# fix the xadmin bug
from django.contrib.auth import get_user_model
User = get_user_model()
ACTION_NAME = {
'add': _('Can add %s'),
'change': _('Can change %s'),
'edit': _('Can edit %s'),
'delete': _('Can delete %s'),
'view': _('Can view %s'),
}
def get_permission_name(p):
action = p.codename.split('_')[0]
if action in ACTION_NAME:
return ACTION_NAME[action] % str(p.content_type)
else:
return p.name
class PermissionModelMultipleChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, p):
return get_permission_name(p)
class GroupAdmin(object):
search_fields = ('name',)
ordering = ('name',)
style_fields = {'permissions': 'm2m_transfer'}
model_icon = 'fa fa-group'
def get_field_attrs(self, db_field, **kwargs):
attrs = super(GroupAdmin, self).get_field_attrs(db_field, **kwargs)
if db_field.name == 'permissions':
attrs['form_class'] = PermissionModelMultipleChoiceField
return attrs
class UserAdmin(object):
change_user_password_template = None
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
style_fields = {'user_permissions': 'm2m_transfer'}
model_icon = 'fa fa-user'
relfield_style = 'fk-ajax'
def get_field_attrs(self, db_field, **kwargs):
attrs = super(UserAdmin, self).get_field_attrs(db_field, **kwargs)
if db_field.name == 'user_permissions':
attrs['form_class'] = PermissionModelMultipleChoiceField
return attrs
def get_model_form(self, **kwargs):
if self.org_obj is None:
self.form = UserCreationForm
else:
self.form = UserChangeForm
return super(UserAdmin, self).get_model_form(**kwargs)
def get_form_layout(self):
if self.org_obj:
self.form_layout = (
Main(
Fieldset('',
'username', 'password',
css_class='unsort no_title'
),
Fieldset(_('Personal info'),
Row('first_name', 'last_name'),
'email'
),
Fieldset(_('Permissions'),
'groups', 'user_permissions'
),
Fieldset(_('Important dates'),
'last_login', 'date_joined'
),
),
Side(
Fieldset(_('Status'),
'is_active', 'is_staff', 'is_superuser',
),
)
)
return super(UserAdmin, self).get_form_layout()
class PermissionAdmin(object):
def show_name(self, p):
return get_permission_name(p)
show_name.short_description = _('Permission Name')
show_name.is_column = True
model_icon = 'fa fa-lock'
list_display = ('show_name', )
site.register(Group, GroupAdmin)
site.register(User, UserAdmin)
site.register(Permission, PermissionAdmin)
class UserFieldPlugin(BaseAdminPlugin):
user_fields = []
def get_field_attrs(self, __, db_field, **kwargs):
if self.user_fields and db_field.name in self.user_fields:
return {'widget': forms.HiddenInput}
return __()
def get_form_datas(self, datas):
if self.user_fields and 'data' in datas:
if hasattr(datas['data'],'_mutable') and not datas['data']._mutable:
datas['data'] = datas['data'].copy()
for f in self.user_fields:
datas['data'][f] = self.user.id
return datas
site.register_plugin(UserFieldPlugin, ModelFormAdminView)
class ModelPermissionPlugin(BaseAdminPlugin):
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
def queryset(self, qs):
if self.user_can_access_owned_objects_only and \
not self.user.is_superuser:
filters = {self.user_owned_objects_field: self.user}
qs = qs.filter(**filters)
return qs
def get_list_display(self, list_display):
if self.user_can_access_owned_objects_only and \
not self.user.is_superuser and \
self.user_owned_objects_field in list_display:
list_display.remove(self.user_owned_objects_field)
return list_display
site.register_plugin(ModelPermissionPlugin, ModelAdminView)
class AccountMenuPlugin(BaseAdminPlugin):
def block_top_account_menu(self, context, nodes):
return '<li><a href="%s"><i class="fa fa-key"></i> %s</a></li>' % (self.get_admin_url('account_password'), _('Change Password'))
site.register_plugin(AccountMenuPlugin, CommAdminView)
class ChangePasswordView(ModelAdminView):
model = User
change_password_form = AdminPasswordChangeForm
change_user_password_template = None
@csrf_protect_m
def get(self, request, object_id):
if not self.has_change_permission(request):
raise PermissionDenied
self.obj = self.get_object(unquote(object_id))
self.form = self.change_password_form(self.obj)
return self.get_response()
def get_media(self):
media = super(ChangePasswordView, self).get_media()
media = media + self.vendor('xadmin.form.css', 'xadmin.page.form.js') + self.form.media
return media
def get_context(self):
context = super(ChangePasswordView, self).get_context()
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.form.helper = helper
context.update({
'title': _('Change password: %s') % escape(unicode(self.obj)),
'form': self.form,
'has_delete_permission': False,
'has_change_permission': True,
'has_view_permission': True,
'original': self.obj,
})
return context
def get_response(self):
return TemplateResponse(self.request, [
self.change_user_password_template or
'xadmin/auth/user/change_password.html'
], self.get_context(), current_app=self.admin_site.name)
@method_decorator(sensitive_post_parameters())
@csrf_protect_m
def post(self, request, object_id):
if not self.has_change_permission(request):
raise PermissionDenied
self.obj = self.get_object(unquote(object_id))
self.form = self.change_password_form(self.obj, request.POST)
if self.form.is_valid():
self.form.save()
self.message_user(_('Password changed successfully.'), 'success')
return HttpResponseRedirect(self.model_admin_url('change', self.obj.pk))
else:
return self.get_response()
class ChangeAccountPasswordView(ChangePasswordView):
change_password_form = PasswordChangeForm
@csrf_protect_m
def get(self, request):
self.obj = self.user
self.form = self.change_password_form(self.obj)
return self.get_response()
def get_context(self):
context = super(ChangeAccountPasswordView, self).get_context()
context.update({
'title': _('Change password'),
'account_view': True,
})
return context
@method_decorator(sensitive_post_parameters())
@csrf_protect_m
def post(self, request):
self.obj = self.user
self.form = self.change_password_form(self.obj, request.POST)
if self.form.is_valid():
self.form.save()
self.message_user(_('Password changed successfully.'), 'success')
return HttpResponseRedirect(self.get_admin_url('index'))
else:
return self.get_response()
site.register_view(r'^users/userprofile/(.+)/password/$',
ChangePasswordView, name='user_change_password')
site.register_view(r'^account/password/$', ChangeAccountPasswordView,
name='account_password')
|
apache-2.0
|
clbarnes/bctpy
|
bct/algorithms/modularity.py
|
1
|
58844
|
from __future__ import division, print_function
import numpy as np
from bct.utils import BCTParamError, normalize, get_rng
def ci2ls(ci):
'''
Convert from a community index vector to a 2D python list of modules
The list is a pure python list, not requiring numpy.
Parameters
----------
ci : Nx1 np.ndarray
the community index vector
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of zero-indexing parameter)
'''
if not np.size(ci):
return ci # list is empty
_, ci = np.unique(ci, return_inverse=True)
ci += 1
nr_indices = int(max(ci))
ls = []
for c in range(nr_indices):
ls.append([])
for i, x in enumerate(ci):
ls[ci[i] - 1].append(i)
return ls
def ls2ci(ls, zeroindexed=False):
'''
Convert from a 2D python list of modules to a community index vector.
The list is a pure python list, not requiring numpy.
Parameters
----------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of value of zeroindexed parameter)
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ci : Nx1 np.ndarray
community index vector
'''
if ls is None or np.size(ls) == 0:
return () # list is empty
nr_indices = sum(map(len, ls))
ci = np.zeros((nr_indices,), dtype=int)
z = int(not zeroindexed)
for i, x in enumerate(ls):
for j, y in enumerate(ls[i]):
ci[ls[i][j]] = i + z
return ci
def community_louvain(W, gamma=1, ci=None, B='modularity', seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes which maximizes the number of within-group
edges and minimizes the number of between-group edges.
This function is a fast an accurate multi-iterative generalization of the
louvain community detection algorithm. This function subsumes and improves
upon modularity_[louvain,finetune]_[und,dir]() and additionally allows to
optimize other objective functions (includes built-in Potts Model i
Hamiltonian, allows for custom objective-function matrices).
Parameters
----------
W : NxN np.array
directed/undirected weighted/binary adjacency matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
ignored if an objective function matrix is specified.
ci : Nx1 np.arraylike
initial community affiliation vector. default value=None
B : str | NxN np.arraylike
string describing objective function type, or provides a custom
NxN objective-function matrix. builtin values
'modularity' uses Q-metric as objective function
'potts' uses Potts model Hamiltonian.
'negative_sym' symmetric treatment of negative weights
'negative_asym' asymmetric treatment of negative weights
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.array
final community structure
q : float
optimized q-statistic (modularity only)
'''
rng = get_rng(seed)
n = len(W)
s = np.sum(W)
#if np.min(W) < -1e-10:
# raise BCTParamError('adjmat must not contain negative weights')
if ci is None:
ci = np.arange(n) + 1
else:
if len(ci) != n:
raise BCTParamError('initial ci vector size must equal N')
_, ci = np.unique(ci, return_inverse=True)
ci += 1
Mb = ci.copy()
renormalize = False
if B in ('negative_sym', 'negative_asym'):
renormalize = True
W0 = W * (W > 0)
s0 = np.sum(W0)
B0 = W0 - gamma * np.outer(np.sum(W0, axis=1), np.sum(W0, axis=0)) / s0
W1 = -W * (W < 0)
s1 = np.sum(W1)
if s1:
B1 = W1 - gamma * np.outer(np.sum(W1, axis=1), np.sum(W1, axis=0)) / s1
else:
B1 = 0
elif np.min(W) < -1e-10:
raise BCTParamError("Input connection matrix contains negative "
'weights but objective function dealing with negative weights '
'was not selected')
if B == 'potts' and np.any(np.logical_not(np.logical_or(W == 0, W == 1))):
raise BCTParamError('Potts hamiltonian requires binary input matrix')
if B == 'modularity':
B = W - gamma * np.outer(np.sum(W, axis=1), np.sum(W, axis=0)) / s
elif B == 'potts':
B = W - gamma * np.logical_not(W)
elif B == 'negative_sym':
B = (B0 / (s0 + s1)) - (B1 / (s0 + s1))
elif B == 'negative_asym':
B = (B0 / s0) - (B1 / (s0 + s1))
else:
try:
B = np.array(B)
except:
raise BCTParamError('unknown objective function type')
if B.shape != W.shape:
raise BCTParamError('objective function matrix does not match '
'size of adjacency matrix')
if not np.allclose(B, B.T):
print ('Warning: objective function matrix not symmetric, '
'symmetrizing')
B = (B + B.T) / 2
Hnm = np.zeros((n, n))
for m in range(1, n + 1):
Hnm[:, m - 1] = np.sum(B[:, ci == m], axis=1) # node to module degree
H = np.sum(Hnm, axis=1) # node degree
Hm = np.sum(Hnm, axis=0) # module degree
q0 = -np.inf
# compute modularity
q = np.sum(B[np.tile(ci, (n, 1)) == np.tile(ci, (n, 1)).T]) / s
first_iteration = True
while q - q0 > 1e-10:
it = 0
flag = True
while flag:
it += 1
if it > 1000:
raise BCTParamError('Modularity infinite loop style G. '
'Please contact the developer.')
flag = False
for u in rng.permutation(n):
ma = Mb[u] - 1
dQ = Hnm[u, :] - Hnm[u, ma] + B[u, u] # algorithm condition
dQ[ma] = 0
max_dq = np.max(dQ)
if max_dq > 1e-10:
flag = True
mb = np.argmax(dQ)
Hnm[:, mb] += B[:, u]
Hnm[:, ma] -= B[:, u] # change node-to-module strengths
Hm[mb] += H[u]
Hm[ma] -= H[u] # change module strengths
Mb[u] = mb + 1
_, Mb = np.unique(Mb, return_inverse=True)
Mb += 1
M0 = ci.copy()
if first_iteration:
ci = Mb.copy()
first_iteration = False
else:
for u in range(1, n + 1):
ci[M0 == u] = Mb[u - 1] # assign new modules
n = np.max(Mb)
b1 = np.zeros((n, n))
for i in range(1, n + 1):
for j in range(i, n + 1):
# pool weights of nodes in same module
bm = np.sum(B[np.ix_(Mb == i, Mb == j)])
b1[i - 1, j - 1] = bm
b1[j - 1, i - 1] = bm
B = b1.copy()
Mb = np.arange(1, n + 1)
Hnm = B.copy()
H = np.sum(B, axis=0)
Hm = H.copy()
q0 = q
q = np.trace(B) # compute modularity
# Workaround to normalize
if not renormalize:
return ci, q/s
else:
return ci, q
def link_communities(W, type_clustering='single'):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes which maximizes the number of within-group
edges and minimizes the number of between-group edges.
This algorithm uncovers overlapping community structure via hierarchical
clustering of network links. This algorithm is generalized for
weighted/directed/fully-connected networks
Parameters
----------
W : NxN np.array
directed weighted/binary adjacency matrix
type_clustering : str
type of hierarchical clustering. 'single' for single-linkage,
'complete' for complete-linkage. Default value='single'
Returns
-------
M : CxN np.ndarray
nodal community affiliation matrix.
'''
n = len(W)
W = normalize(W)
if type_clustering not in ('single', 'complete'):
raise BCTParamError('Unrecognized clustering type')
# set diagonal to mean weights
np.fill_diagonal(W, 0)
W[range(n), range(n)] = (
np.sum(W, axis=0) / np.sum(np.logical_not(W), axis=0) +
np.sum(W.T, axis=0) / np.sum(np.logical_not(W.T), axis=0)) / 2
# out/in norm squared
No = np.sum(W**2, axis=1)
Ni = np.sum(W**2, axis=0)
# weighted in/out jaccard
Jo = np.zeros((n, n))
Ji = np.zeros((n, n))
for b in range(n):
for c in range(n):
Do = np.dot(W[b, :], W[c, :].T)
Jo[b, c] = Do / (No[b] + No[c] - Do)
Di = np.dot(W[:, b].T, W[:, c])
Ji[b, c] = Di / (Ni[b] + Ni[c] - Di)
# get link similarity
A, B = np.where(np.logical_and(np.logical_or(W, W.T),
np.triu(np.ones((n, n)), 1)))
m = len(A)
Ln = np.zeros((m, 2), dtype=np.int32) # link nodes
Lw = np.zeros((m,)) # link weights
for i in range(m):
Ln[i, :] = (A[i], B[i])
Lw[i] = (W[A[i], B[i]] + W[B[i], A[i]]) / 2
ES = np.zeros((m, m), dtype=np.float32) # link similarity
for i in range(m):
for j in range(m):
if Ln[i, 0] == Ln[j, 0]:
a = Ln[i, 0]
b = Ln[i, 1]
c = Ln[j, 1]
elif Ln[i, 0] == Ln[j, 1]:
a = Ln[i, 0]
b = Ln[i, 1]
c = Ln[j, 0]
elif Ln[i, 1] == Ln[j, 0]:
a = Ln[i, 1]
b = Ln[i, 0]
c = Ln[j, 1]
elif Ln[i, 1] == Ln[j, 1]:
a = Ln[i, 1]
b = Ln[i, 0]
c = Ln[j, 0]
else:
continue
ES[i, j] = (W[a, b] * W[a, c] * Ji[b, c] +
W[b, a] * W[c, a] * Jo[b, c]) / 2
np.fill_diagonal(ES, 0)
# perform hierarchical clustering
C = np.zeros((m, m), dtype=np.int32) # community affiliation matrix
Nc = C.copy()
Mc = np.zeros((m, m), dtype=np.float32)
Dc = Mc.copy() # community nodes, links, density
U = np.arange(m) # initial community assignments
C[0, :] = np.arange(m)
import time
for i in range(m - 1):
print('hierarchy %i' % i)
#time1 = time.time()
for j in range(len(U)): # loop over communities
ixes = C[i, :] == U[j] # get link indices
links = np.sort(Lw[ixes])
#nodes = np.sort(Ln[ixes,:].flat)
nodes = np.sort(np.reshape(
Ln[ixes, :], 2 * np.size(np.where(ixes))))
# get unique nodes
nodulo = np.append(nodes[0], (nodes[1:])[nodes[1:] != nodes[:-1]])
#nodulo = ((nodes[1:])[nodes[1:] != nodes[:-1]])
nc = len(nodulo)
#nc = len(nodulo)+1
mc = np.sum(links)
min_mc = np.sum(links[:nc - 1]) # minimal weight
dc = (mc - min_mc) / (nc * (nc - 1) /
2 - min_mc) # community density
if np.array(dc).shape is not ():
print(dc)
print(dc.shape)
Nc[i, j] = nc
Mc[i, j] = mc
Dc[i, j] = dc if not np.isnan(dc) else 0
#time2 = time.time()
#print('compute densities time', time2-time1)
C[i + 1, :] = C[i, :] # copy current partition
#if i in (2693,):
# import pdb
# pdb.set_trace()
# Profiling and debugging show that this line, finding
# the max values in this matrix, take about 3x longer than the
# corresponding matlab version. Can it be improved?
u1, u2 = np.where(ES[np.ix_(U, U)] == np.max(ES[np.ix_(U, U)]))
if np.size(u1) > 2:
# pick one
wehr, = np.where((u1 == u2[0]))
uc = np.squeeze((u1[0], u2[0]))
ud = np.squeeze((u1[wehr], u2[wehr]))
u1 = uc
u2 = ud
#time25 = time.time()
#print('copy and max time', time25-time2)
# get unique links (implementation of matlab sortrows)
#ugl = np.array((u1,u2))
ugl = np.sort((u1, u2), axis=1)
ug_rows = ugl[np.argsort(ugl, axis=0)[:, 0]]
# implementation of matlab unique(A, 'rows')
unq_rows = np.vstack({tuple(row) for row in ug_rows})
V = U[unq_rows]
#time3 = time.time()
#print('sortrows time', time3-time25)
for j in range(len(V)):
if type_clustering == 'single':
x = np.max(ES[V[j, :], :], axis=0)
elif type_clustering == 'complete':
x = np.min(ES[V[j, :], :], axis=0)
# assign distances to whole clusters
# import pdb
# pdb.set_trace()
ES[V[j, :], :] = np.array((x, x))
ES[:, V[j, :]] = np.transpose((x, x))
# clear diagonal
ES[V[j, 0], V[j, 0]] = 0
ES[V[j, 1], V[j, 1]] = 0
# merge communities
C[i + 1, C[i + 1, :] == V[j, 1]] = V[j, 0]
V[V == V[j, 1]] = V[j, 0]
#time4 = time.time()
#print('get linkages time', time4-time3)
U = np.unique(C[i + 1, :])
if len(U) == 1:
break
#time5 = time.time()
#print('get unique communities time', time5-time4)
#ENDT HAIERARKIKL CLUSTRRINNG
#ENDT HAIERARKIKL CLUSTRRINNG
#ENDT HAIERARKIKL CLUSTRRINNG
#ENDT HAIERARKIKL CLUSTRRINNG
#ENDT HAIERARKIKL CLUSTRRINNG
#Dc[ np.where(np.isnan(Dc)) ]=0
i = np.argmax(np.sum(Dc * Mc, axis=1))
U = np.unique(C[i, :])
M = np.zeros((len(U), n))
for j in range(len(U)):
M[j, np.unique(Ln[C[i, :] == U[j], :])] = 1
M = M[np.sum(M, axis=1) > 2, :]
return M
def _safe_squeeze(arr, *args, **kwargs):
"""
numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array",
which is not necessarily desirable.
This function does the squeeze operation, but ensures that there is at least
1 dimension in the output.
"""
out = np.squeeze(arr, *args, **kwargs)
if np.ndim(out) == 0:
out = out.reshape((1,))
return out
def modularity_dir(A, gamma=1, kci=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
Parameters
----------
W : NxN np.ndarray
directed weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
kci : Nx1 np.ndarray | None
starting community structure. If specified, calculates the Q-metric
on the community structure giving, without doing any optimzation.
Otherwise, if not specified, uses a spectral modularity maximization
algorithm.
Returns
-------
ci : Nx1 np.ndarray
optimized community structure
Q : float
maximized modularity metric
Notes
-----
This algorithm is deterministic. The matlab function bearing this
name incorrectly disclaims that the outcome depends on heuristics
involving a random seed. The louvain method does depend on a random seed,
but this function uses a deterministic modularity maximization algorithm.
'''
from scipy import linalg
n = len(A) # number of vertices
ki = np.sum(A, axis=0) # in degree
ko = np.sum(A, axis=1) # out degree
m = np.sum(ki) # number of edges
b = A - gamma * np.outer(ko, ki) / m
B = b + b.T # directed modularity matrix
init_mod = np.arange(n) # initial one big module
modules = [] # output modules list
def recur(module):
n = len(module)
modmat = B[module][:, module]
vals, vecs = linalg.eig(modmat) # biggest eigendecomposition
rlvals = np.real(vals)
max_eigvec = _safe_squeeze(vecs[:, np.where(rlvals == np.max(rlvals))])
if max_eigvec.ndim > 1: # if multiple max eigenvalues, pick one
max_eigvec = max_eigvec[:, 0]
# initial module assignments
mod_asgn = _safe_squeeze((max_eigvec >= 0) * 2 - 1)
q = np.dot(mod_asgn, np.dot(modmat, mod_asgn)) # modularity change
if q > 0: # change in modularity was positive
qmax = q
np.fill_diagonal(modmat, 0)
it = np.ma.masked_array(np.ones((n,)), False)
mod_asgn_iter = mod_asgn.copy()
while np.any(it): # do some iterative fine tuning
# this line is linear algebra voodoo
q_iter = qmax - 4 * mod_asgn_iter * \
(np.dot(modmat, mod_asgn_iter))
qmax = np.max(q_iter * it)
imax = np.argmax(q_iter * it)
#imax, = np.where(q_iter == qmax)
#if len(imax) > 0:
# imax = imax[0]
# print(imax)
# does switching increase modularity?
mod_asgn_iter[imax] *= -1
it[imax] = np.ma.masked
if qmax > q:
q = qmax
mod_asgn = mod_asgn_iter
if np.abs(np.sum(mod_asgn)) == n: # iteration yielded null module
modules.append(np.array(module).tolist())
else:
mod1 = module[np.where(mod_asgn == 1)]
mod2 = module[np.where(mod_asgn == -1)]
recur(mod1)
recur(mod2)
else: # change in modularity was negative or 0
modules.append(np.array(module).tolist())
# adjustment to one-based indexing occurs in ls2ci
if kci is None:
recur(init_mod)
ci = ls2ci(modules)
else:
ci = kci
s = np.tile(ci, (n, 1))
q = np.sum(np.logical_not(s - s.T) * B / (2 * m))
return ci, q
def modularity_finetune_dir(W, ci=None, gamma=1, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
This algorithm is inspired by the Kernighan-Lin fine-tuning algorithm
and is designed to refine a previously detected community structure.
Parameters
----------
W : NxN np.ndarray
directed weighted/binary connection matrix
ci : Nx1 np.ndarray | None
initial community affiliation vector
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector
Q : float
optimized modularity metric
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
if ci is None:
ci = np.arange(n) + 1
else:
_, ci = np.unique(ci, return_inverse=True)
ci += 1
s = np.sum(W) # weight of edges
knm_o = np.zeros((n, n)) # node-to-module out degree
knm_i = np.zeros((n, n)) # node-to-module in degree
for m in range(np.max(ci)):
knm_o[:, m] = np.sum(W[:, ci == (m + 1)], axis=1)
knm_i[:, m] = np.sum(W[ci == (m + 1), :], axis=0)
k_o = np.sum(knm_o, axis=1) # node out-degree
k_i = np.sum(knm_i, axis=1) # node in-degree
km_o = np.sum(knm_o, axis=0) # module out-degree
km_i = np.sum(knm_i, axis=0) # module out-degree
flag = True
while flag:
flag = False
for u in rng.permutation(n): # loop over nodes in random order
ma = ci[u] - 1 # current module of u
# algorithm condition
dq_o = ((knm_o[u, :] - knm_o[u, ma] + W[u, u]) -
gamma * k_o[u] * (km_i - km_i[ma] + k_i[u]) / s)
dq_i = ((knm_i[u, :] - knm_i[u, ma] + W[u, u]) -
gamma * k_i[u] * (km_o - km_o[ma] + k_o[u]) / s)
dq = (dq_o + dq_i) / 2
dq[ma] = 0
max_dq = np.max(dq) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
mb = np.argmax(dq) # take only one value
# print max_dq,mb
knm_o[:, mb] += W[u, :].T # change node-to-module out-degrees
knm_o[:, ma] -= W[u, :].T
knm_i[:, mb] += W[:, u] # change node-to-module in-degrees
knm_i[:, ma] -= W[:, u]
km_o[mb] += k_o[u] # change module out-degrees
km_o[ma] -= k_o[u]
km_i[mb] += k_i[u] # change module in-degrees
km_i[ma] -= k_i[u]
ci[u] = mb + 1 # reassign module
flag = True
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.max(ci) # new number of modules
w = np.zeros((m, m)) # new weighted matrix
for u in range(m):
for v in range(m):
# pool weights of nodes in same module
w[u, v] = np.sum(W[np.ix_(ci == u + 1, ci == v + 1)])
q = np.trace(w) / s - gamma * np.sum(np.dot(w / s, w / s))
return ci, q
def modularity_finetune_und(W, ci=None, gamma=1, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
This algorithm is inspired by the Kernighan-Lin fine-tuning algorithm
and is designed to refine a previously detected community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
ci : Nx1 np.ndarray | None
initial community affiliation vector
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector
Q : float
optimized modularity metric
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
#import time
n = len(W) # number of nodes
if ci is None:
ci = np.arange(n) + 1
else:
_, ci = np.unique(ci, return_inverse=True)
ci += 1
s = np.sum(W) # total weight of edges
knm = np.zeros((n, n)) # node-to-module degree
for m in range(np.max(ci)):
knm[:, m] = np.sum(W[:, ci == (m + 1)], axis=1)
k = np.sum(knm, axis=1) # node degree
km = np.sum(knm, axis=0) # module degree
flag = True
while flag:
flag = False
for u in rng.permutation(n):
# for u in np.arange(n):
ma = ci[u] - 1
# time.sleep(1)
# algorithm condition
dq = (knm[u, :] - knm[u, ma] + W[u, u]) - \
gamma * k[u] * (km - km[ma] + k[u]) / s
# print
# np.sum(knm[u,:],knm[u,ma],W[u,u],gamma,k[u],np.sum(km),km[ma],k[u],s
dq[ma] = 0
max_dq = np.max(dq) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
mb = np.argmax(dq) # take only one value
# print max_dq, mb
knm[:, mb] += W[:, u] # change node-to-module degrees
knm[:, ma] -= W[:, u]
km[mb] += k[u] # change module degrees
km[ma] -= k[u]
ci[u] = mb + 1
flag = True
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.max(ci)
w = np.zeros((m, m))
for u in range(m):
for v in range(m):
# pool weights of nodes in same module
wm = np.sum(W[np.ix_(ci == u + 1, ci == v + 1)])
w[u, v] = wm
w[v, u] = wm
q = np.trace(w) / s - gamma * np.sum(np.dot(w / s, w / s))
return ci, q
def modularity_finetune_und_sign(W, qtype='sta', gamma=1, ci=None, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
This algorithm is inspired by the Kernighan-Lin fine-tuning algorithm
and is designed to refine a previously detected community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights.
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
ci : Nx1 np.ndarray | None
initial community affiliation vector
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector
Q : float
optimized modularity metric
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes/modules
if ci is None:
ci = np.arange(n) + 1
else:
_, ci = np.unique(ci, return_inverse=True)
ci += 1
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # positive sum of weights
s1 = np.sum(W1) # negative sum of weights
Knm0 = np.zeros((n, n)) # positive node-to-module-degree
Knm1 = np.zeros((n, n)) # negative node-to-module degree
for m in range(int(np.max(ci))): # loop over modules
Knm0[:, m] = np.sum(W0[:, ci == m + 1], axis=1)
Knm1[:, m] = np.sum(W1[:, ci == m + 1], axis=1)
Kn0 = np.sum(Knm0, axis=1) # positive node degree
Kn1 = np.sum(Knm1, axis=1) # negative node degree
Km0 = np.sum(Knm0, axis=0) # positive module degree
Km1 = np.sum(Knm1, axis=0) # negative module degree
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-dQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = 1 / (s0 + s1) # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
flag = True # flag for within hierarchy search
h = 0
while flag:
h += 1
if h > 1000:
raise BCTParamError('Modularity infinite loop style D')
flag = False
for u in rng.permutation(n): # loop over nodes in random order
ma = ci[u] - 1 # current module of u
dq0 = ((Knm0[u, :] + W0[u, u] - Knm0[u, ma]) -
gamma * Kn0[u] * (Km0 + Kn0[u] - Km0[ma]) / s0)
dq1 = ((Knm1[u, :] + W1[u, u] - Knm1[u, ma]) -
gamma * Kn1[u] * (Km1 + Kn1[u] - Km1[ma]) / s1)
dq = d0 * dq0 - d1 * dq1 # rescaled changes in modularity
dq[ma] = 0 # no changes for same module
# print dq,ma,u
max_dq = np.max(dq) # maximal increase in modularity
mb = np.argmax(dq) # corresponding module
if max_dq > 1e-10: # if maximal increase is positive
# print h,max_dq,mb,u
flag = True
ci[u] = mb + 1 # reassign module
Knm0[:, mb] += W0[:, u]
Knm0[:, ma] -= W0[:, u]
Knm1[:, mb] += W1[:, u]
Knm1[:, ma] -= W1[:, u]
Km0[mb] += Kn0[u]
Km0[ma] -= Kn0[u]
Km1[mb] += Kn1[u]
Km1[ma] -= Kn1[u]
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.tile(ci, (n, 1))
q0 = (W0 - np.outer(Kn0, Kn0) / s0) * (m == m.T)
q1 = (W1 - np.outer(Kn1, Kn1) / s1) * (m == m.T)
q = d0 * np.sum(q0) - d1 * np.sum(q1)
return ci, q
def modularity_louvain_dir(W, gamma=1, hierarchy=False, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
directed weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
s = np.sum(W) # total weight of edges
h = 0 # hierarchy index
ci = []
ci.append(np.arange(n) + 1) # hierarchical module assignments
q = []
q.append(-1) # hierarchical modularity index
n0 = n
while True:
if h > 300:
raise BCTParamError('Modularity Infinite Loop Style E. Please '
'contact the developer with this error.')
k_o = np.sum(W, axis=1) # node in/out degrees
k_i = np.sum(W, axis=0)
km_o = k_o.copy() # module in/out degrees
km_i = k_i.copy()
knm_o = W.copy() # node-to-module in/out degrees
knm_i = W.copy()
m = np.arange(n) + 1 # initial module assignments
flag = True # flag for within hierarchy search
it = 0
while flag:
it += 1
if it > 1000:
raise BCTParamError('Modularity Infinite Loop Style F. Please '
'contact the developer with this error.')
flag = False
# loop over nodes in random order
for u in rng.permutation(n):
ma = m[u] - 1
# algorithm condition
dq_o = ((knm_o[u, :] - knm_o[u, ma] + W[u, u]) -
gamma * k_o[u] * (km_i - km_i[ma] + k_i[u]) / s)
dq_i = ((knm_i[u, :] - knm_i[u, ma] + W[u, u]) -
gamma * k_i[u] * (km_o - km_o[ma] + k_o[u]) / s)
dq = (dq_o + dq_i) / 2
dq[ma] = 0
max_dq = np.max(dq) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
mb = np.argmax(dq) # take only one value
knm_o[:, mb] += W[u, :].T # change node-to-module degrees
knm_o[:, ma] -= W[u, :].T
knm_i[:, mb] += W[:, u]
knm_i[:, ma] -= W[:, u]
km_o[mb] += k_o[u] # change module out-degrees
km_o[ma] -= k_o[u]
km_i[mb] += k_i[u]
km_i[ma] -= k_i[u]
m[u] = mb + 1 # reassign module
flag = True
_, m = np.unique(m, return_inverse=True)
m += 1
h += 1
ci.append(np.zeros((n0,)))
# for i,mi in enumerate(m): #loop through module assignments
for i in range(n):
# ci[h][np.where(ci[h-1]==i)]=mi #assign new modules
ci[h][np.where(ci[h - 1] == i + 1)] = m[i]
n = np.max(m) # new number of modules
W1 = np.zeros((n, n)) # new weighted matrix
for i in range(n):
for j in range(n):
# pool weights of nodes in same module
W1[i, j] = np.sum(W[np.ix_(m == i + 1, m == j + 1)])
q.append(0)
# compute modularity
q[h] = np.trace(W1) / s - gamma * np.sum(np.dot(W1 / s, W1 / s))
if q[h] - q[h - 1] < 1e-10: # if modularity does not increase
break
ci = np.array(ci, dtype=int)
if hierarchy:
ci = ci[1:-1]
q = q[1:-1]
return ci, q
else:
return ci[h - 1], q[h - 1]
def modularity_louvain_und(W, gamma=1, hierarchy=False, seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (as of writing). The algorithm may also be used to detect
hierarchical community structure.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
hierarchy : bool
Enables hierarchical output. Defalut value=False
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector. If hierarchical output enabled,
it is an NxH np.ndarray instead with multiple iterations
Q : float
optimized modularity metric. If hierarchical output enabled, becomes
an Hx1 array of floats instead.
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
s = np.sum(W) # weight of edges
h = 0 # hierarchy index
ci = []
ci.append(np.arange(n) + 1) # hierarchical module assignments
q = []
q.append(-1) # hierarchical modularity values
n0 = n
#knm = np.zeros((n,n))
# for j in np.xrange(n0+1):
# knm[:,j] = np.sum(w[;,
while True:
if h > 300:
raise BCTParamError('Modularity Infinite Loop Style B. Please '
'contact the developer with this error.')
k = np.sum(W, axis=0) # node degree
Km = k.copy() # module degree
Knm = W.copy() # node-to-module degree
m = np.arange(n) + 1 # initial module assignments
flag = True # flag for within-hierarchy search
it = 0
while flag:
it += 1
if it > 1000:
raise BCTParamError('Modularity Infinite Loop Style C. Please '
'contact the developer with this error.')
flag = False
# loop over nodes in random order
for i in rng.permutation(n):
ma = m[i] - 1
# algorithm condition
dQ = ((Knm[i, :] - Knm[i, ma] + W[i, i]) -
gamma * k[i] * (Km - Km[ma] + k[i]) / s)
dQ[ma] = 0
max_dq = np.max(dQ) # find maximal modularity increase
if max_dq > 1e-10: # if maximal increase positive
j = np.argmax(dQ) # take only one value
# print max_dq,j,dQ[j]
Knm[:, j] += W[:, i] # change node-to-module degrees
Knm[:, ma] -= W[:, i]
Km[j] += k[i] # change module degrees
Km[ma] -= k[i]
m[i] = j + 1 # reassign module
flag = True
_, m = np.unique(m, return_inverse=True) # new module assignments
# print m,h
m += 1
h += 1
ci.append(np.zeros((n0,)))
# for i,mi in enumerate(m): #loop through initial module assignments
for i in range(n):
# print i, m[i], n0, h, len(m), n
# ci[h][np.where(ci[h-1]==i+1)]=mi #assign new modules
ci[h][np.where(ci[h - 1] == i + 1)] = m[i]
n = np.max(m) # new number of modules
W1 = np.zeros((n, n)) # new weighted matrix
for i in range(n):
for j in range(i, n):
# pool weights of nodes in same module
wp = np.sum(W[np.ix_(m == i + 1, m == j + 1)])
W1[i, j] = wp
W1[j, i] = wp
W = W1
q.append(0)
# compute modularity
q[h] = np.trace(W) / s - gamma * np.sum(np.dot(W / s, W / s))
if q[h] - q[h - 1] < 1e-10: # if modularity does not increase
break
ci = np.array(ci, dtype=int)
if hierarchy:
ci = ci[1:-1]
q = q[1:-1]
return ci, q
else:
return ci[h - 1], q[h - 1]
def modularity_louvain_und_sign(W, gamma=1, qtype='sta', seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
The Louvain algorithm is a fast and accurate community detection
algorithm (at the time of writing).
Use this function as opposed to modularity_louvain_und() only if the
network contains a mix of positive and negative weights. If the network
contains all positive weights, the output will be equivalent to that of
modularity_louvain_und().
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector
Q : float
optimized modularity metric
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W) # number of nodes
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # weight of positive links
s1 = np.sum(W1) # weight of negative links
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-sQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = d0 # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
h = 1 # hierarchy index
nh = n # number of nodes in hierarchy
ci = [None, np.arange(n) + 1] # hierarchical module assignments
q = [-1, 0] # hierarchical modularity values
while q[h] - q[h - 1] > 1e-10:
if h > 300:
raise BCTParamError('Modularity Infinite Loop Style A. Please '
'contact the developer with this error.')
kn0 = np.sum(W0, axis=0) # positive node degree
kn1 = np.sum(W1, axis=0) # negative node degree
km0 = kn0.copy() # positive module degree
km1 = kn1.copy() # negative module degree
knm0 = W0.copy() # positive node-to-module degree
knm1 = W1.copy() # negative node-to-module degree
m = np.arange(nh) + 1 # initial module assignments
flag = True # flag for within hierarchy search
it = 0
while flag:
it += 1
if it > 1000:
raise BCTParamError('Infinite Loop was detected and stopped. '
'This was probably caused by passing in a directed matrix.')
flag = False
# loop over nodes in random order
for u in rng.permutation(nh):
ma = m[u] - 1
dQ0 = ((knm0[u, :] + W0[u, u] - knm0[u, ma]) -
gamma * kn0[u] * (km0 + kn0[u] - km0[ma]) / s0) # positive dQ
dQ1 = ((knm1[u, :] + W1[u, u] - knm1[u, ma]) -
gamma * kn1[u] * (km1 + kn1[u] - km1[ma]) / s1) # negative dQ
dQ = d0 * dQ0 - d1 * dQ1 # rescaled changes in modularity
dQ[ma] = 0 # no changes for same module
max_dQ = np.max(dQ) # maximal increase in modularity
if max_dQ > 1e-10: # if maximal increase is positive
flag = True
mb = np.argmax(dQ)
# change positive node-to-module degrees
knm0[:, mb] += W0[:, u]
knm0[:, ma] -= W0[:, u]
# change negative node-to-module degrees
knm1[:, mb] += W1[:, u]
knm1[:, ma] -= W1[:, u]
km0[mb] += kn0[u] # change positive module degrees
km0[ma] -= kn0[u]
km1[mb] += kn1[u] # change negative module degrees
km1[ma] -= kn1[u]
m[u] = mb + 1 # reassign module
h += 1
ci.append(np.zeros((n,)))
_, m = np.unique(m, return_inverse=True)
m += 1
for u in range(nh): # loop through initial module assignments
ci[h][np.where(ci[h - 1] == u + 1)] = m[u] # assign new modules
nh = np.max(m) # number of new nodes
wn0 = np.zeros((nh, nh)) # new positive weights matrix
wn1 = np.zeros((nh, nh))
for u in range(nh):
for v in range(u, nh):
wn0[u, v] = np.sum(W0[np.ix_(m == u + 1, m == v + 1)])
wn1[u, v] = np.sum(W1[np.ix_(m == u + 1, m == v + 1)])
wn0[v, u] = wn0[u, v]
wn1[v, u] = wn1[u, v]
W0 = wn0
W1 = wn1
q.append(0)
# compute modularity
q0 = np.trace(W0) - np.sum(np.dot(W0, W0)) / s0
q1 = np.trace(W1) - np.sum(np.dot(W1, W1)) / s1
q[h] = d0 * q0 - d1 * q1
_, ci_ret = np.unique(ci[-1], return_inverse=True)
ci_ret += 1
return ci_ret, q[-1]
def modularity_probtune_und_sign(W, qtype='sta', gamma=1, ci=None, p=.45,
seed=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
High-modularity degeneracy is the presence of many topologically
distinct high-modularity partitions of the network.
This algorithm is inspired by the Kernighan-Lin fine-tuning algorithm
and is designed to probabilistically refine a previously detected
community by incorporating random node moves into a finetuning
algorithm.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
ci : Nx1 np.ndarray | None
initial community affiliation vector
p : float
probability of random node moves. Default value = 0.45
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
ci : Nx1 np.ndarray
refined community affiliation vector
Q : float
optimized modularity metric
Notes
-----
Ci and Q may vary from run to run, due to heuristics in the
algorithm. Consequently, it may be worth to compare multiple runs.
'''
rng = get_rng(seed)
n = len(W)
if ci is None:
ci = np.arange(n) + 1
else:
_, ci = np.unique(ci, return_inverse=True)
ci += 1
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # positive sum of weights
s1 = np.sum(W1) # negative sum of weights
Knm0 = np.zeros((n, n)) # positive node-to-module degree
Knm1 = np.zeros((n, n)) # negative node-to-module degree
for m in range(int(np.max(ci))): # loop over initial modules
Knm0[:, m] = np.sum(W0[:, ci == m + 1], axis=1)
Knm1[:, m] = np.sum(W1[:, ci == m + 1], axis=1)
Kn0 = np.sum(Knm0, axis=1) # positive node degree
Kn1 = np.sum(Knm1, axis=1) # negative node degree
Km0 = np.sum(Knm0, axis=0) # positive module degree
Km1 = np.sum(Knm1, axis=0) # negaitve module degree
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-dQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = 1 / (s0 + s1) # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
for u in rng.permutation(n): # loop over nodes in random order
ma = ci[u] - 1 # current module
r = rng.random_sample() < p
if r:
mb = rng.randint(n) # select new module randomly
else:
dq0 = ((Knm0[u, :] + W0[u, u] - Knm0[u, ma]) -
gamma * Kn0[u] * (Km0 + Kn0[u] - Km0[ma]) / s0)
dq1 = ((Knm1[u, :] + W1[u, u] - Knm1[u, ma]) -
gamma * Kn1[u] * (Km1 + Kn1[u] - Km1[ma]) / s1)
dq = d0 * dq0 - d1 * dq1
dq[ma] = 0
max_dq = np.max(dq)
mb = np.argmax(dq)
if r or max_dq > 1e-10:
ci[u] = mb + 1
Knm0[:, mb] += W0[:, u]
Knm0[:, ma] -= W0[:, u]
Knm1[:, mb] += W1[:, u]
Knm1[:, ma] -= W1[:, u]
Km0[mb] += Kn0[u]
Km0[ma] -= Kn0[u]
Km1[mb] += Kn1[u]
Km1[ma] -= Kn1[u]
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.tile(ci, (n, 1))
q0 = (W0 - np.outer(Kn0, Kn0) / s0) * (m == m.T)
q1 = (W1 - np.outer(Kn1, Kn1) / s1) * (m == m.T)
q = d0 * np.sum(q0) - d1 * np.sum(q1)
return ci, q
def modularity_und(A, gamma=1, kci=None):
'''
The optimal community structure is a subdivision of the network into
nonoverlapping groups of nodes in a way that maximizes the number of
within-group edges, and minimizes the number of between-group edges.
The modularity is a statistic that quantifies the degree to which the
network may be subdivided into such clearly delineated groups.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix
gamma : float
resolution parameter. default value=1. Values 0 <= gamma < 1 detect
larger modules while gamma > 1 detects smaller modules.
kci : Nx1 np.ndarray | None
starting community structure. If specified, calculates the Q-metric
on the community structure giving, without doing any optimzation.
Otherwise, if not specified, uses a spectral modularity maximization
algorithm.
Returns
-------
ci : Nx1 np.ndarray
optimized community structure
Q : float
maximized modularity metric
Notes
-----
This algorithm is deterministic. The matlab function bearing this
name incorrectly disclaims that the outcome depends on heuristics
involving a random seed. The louvain method does depend on a random seed,
but this function uses a deterministic modularity maximization algorithm.
'''
from scipy import linalg
n = len(A) # number of vertices
k = np.sum(A, axis=0) # degree
m = np.sum(k) # number of edges (each undirected edge
# is counted twice)
B = A - gamma * np.outer(k, k) / m # initial modularity matrix
init_mod = np.arange(n) # initial one big module
modules = [] # output modules list
def recur(module):
n = len(module)
modmat = B[module][:, module]
modmat -= np.diag(np.sum(modmat, axis=0))
vals, vecs = linalg.eigh(modmat) # biggest eigendecomposition
rlvals = np.real(vals)
max_eigvec = _safe_squeeze(vecs[:, np.where(rlvals == np.max(rlvals))])
if max_eigvec.ndim > 1: # if multiple max eigenvalues, pick one
max_eigvec = max_eigvec[:, 0]
# initial module assignments
mod_asgn = _safe_squeeze((max_eigvec >= 0) * 2 - 1)
q = np.dot(mod_asgn, np.dot(modmat, mod_asgn)) # modularity change
if q > 0: # change in modularity was positive
qmax = q
np.fill_diagonal(modmat, 0)
it = np.ma.masked_array(np.ones((n,)), False)
mod_asgn_iter = mod_asgn.copy()
while np.any(it): # do some iterative fine tuning
# this line is linear algebra voodoo
q_iter = qmax - 4 * mod_asgn_iter * \
(np.dot(modmat, mod_asgn_iter))
qmax = np.max(q_iter * it)
imax = np.argmax(q_iter * it)
#imax, = np.where(q_iter == qmax)
#if len(imax) > 1:
# imax = imax[0]
# does switching increase modularity?
mod_asgn_iter[imax] *= -1
it[imax] = np.ma.masked
if qmax > q:
q = qmax
mod_asgn = mod_asgn_iter
if np.abs(np.sum(mod_asgn)) == n: # iteration yielded null module
modules.append(np.array(module).tolist())
return
else:
mod1 = module[np.where(mod_asgn == 1)]
mod2 = module[np.where(mod_asgn == -1)]
recur(mod1)
recur(mod2)
else: # change in modularity was negative or 0
modules.append(np.array(module).tolist())
# adjustment to one-based indexing occurs in ls2ci
if kci is None:
recur(init_mod)
ci = ls2ci(modules)
else:
ci = kci
s = np.tile(ci, (n, 1))
q = np.sum(np.logical_not(s - s.T) * B / m)
return ci, q
def modularity_und_sign(W, ci, qtype='sta'):
'''
This function simply calculates the signed modularity for a given
partition. It does not do automatic partition generation right now.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
ci : Nx1 np.ndarray
community partition
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
Returns
-------
ci : Nx1 np.ndarray
the partition which was input (for consistency of the API)
Q : float
maximized modularity metric
Notes
-----
uses a deterministic algorithm
'''
n = len(W)
_, ci = np.unique(ci, return_inverse=True)
ci += 1
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # positive sum of weights
s1 = np.sum(W1) # negative sum of weights
Knm0 = np.zeros((n, n)) # positive node-to-module degree
Knm1 = np.zeros((n, n)) # negative node-to-module degree
for m in range(int(np.max(ci))): # loop over initial modules
Knm0[:, m] = np.sum(W0[:, ci == m + 1], axis=1)
Knm1[:, m] = np.sum(W1[:, ci == m + 1], axis=1)
Kn0 = np.sum(Knm0, axis=1) # positive node degree
Kn1 = np.sum(Knm1, axis=1) # negative node degree
Km0 = np.sum(Knm0, axis=0) # positive module degree
Km1 = np.sum(Knm1, axis=0) # negaitve module degree
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-dQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = 1 / (s0 + s1) # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
m = np.tile(ci, (n, 1))
q0 = (W0 - np.outer(Kn0, Kn0) / s0) * (m == m.T)
q1 = (W1 - np.outer(Kn1, Kn1) / s1) * (m == m.T)
q = d0 * np.sum(q0) - d1 * np.sum(q1)
return ci, q
def partition_distance(cx, cy):
'''
This function quantifies the distance between pairs of community
partitions with information theoretic measures.
Parameters
----------
cx : Nx1 np.ndarray
community affiliation vector X
cy : Nx1 np.ndarray
community affiliation vector Y
Returns
-------
VIn : Nx1 np.ndarray
normalized variation of information
MIn : Nx1 np.ndarray
normalized mutual information
Notes
-----
(Definitions:
VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n)
MIn = 2MI(X,Y)/[H(X)+H(Y)]
where H is entropy, MI is mutual information and n is number of nodes)
'''
n = np.size(cx)
_, cx = np.unique(cx, return_inverse=True)
_, cy = np.unique(cy, return_inverse=True)
_, cxy = np.unique(cx + cy * 1j, return_inverse=True)
cx += 1
cy += 1
cxy += 1
Px = np.histogram(cx, bins=np.max(cx))[0] / n
Py = np.histogram(cy, bins=np.max(cy))[0] / n
Pxy = np.histogram(cxy, bins=np.max(cxy))[0] / n
Hx = -np.sum(Px * np.log(Px))
Hy = -np.sum(Py * np.log(Py))
Hxy = -np.sum(Pxy * np.log(Pxy))
Vin = (2 * Hxy - Hx - Hy) / np.log(n)
Min = 2 * (Hx + Hy - Hxy) / (Hx + Hy)
return Vin, Min
|
gpl-3.0
|
jburns12/stixproject.github.io
|
documentation/idioms/snort-test-mechanism/snort-test-mechanism-consumer.py
|
1
|
1373
|
#!/usr/bin/env python
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
'''
The following code requires python-stix v1.1.1.0 or greater installed.
For installation instructions, please refer to https://github.com/STIXProject/python-stix.
'''
import sys
from stix.core import STIXPackage
def parse_stix(stix_package):
for indicator in stix_package.indicators:
print("== INDICATOR ==")
print("Title: " + indicator.title)
print("Confidence: " + indicator.confidence.value.value)
for indicated_ttp in indicator.indicated_ttps:
# Look up each TTP label
ttp = stix_package.find(indicated_ttp.item.idref)
for target in ttp.exploit_targets:
et = stix_package.find(target.item.idref)
for vuln in et.vulnerabilities:
print("Indicated TTP: " + ttp.title + ":" + vuln.cve_id)
for tm in indicator.test_mechanisms:
print("Producer: " + tm.producer.identity.name)
print("Efficacy: " + tm.efficacy.value.value)
for rule in tm.rules:
print("Rule: " + rule.value)
if __name__ == '__main__':
try:
fname = sys.argv[1]
except:
exit(1)
fd = open(fname)
stix_pkg = STIXPackage.from_xml(fd)
parse_stix(stix_pkg)
|
bsd-3-clause
|
TurboTurtle/sos
|
sos/report/plugins/openshift.py
|
5
|
15114
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
from fnmatch import translate
import os
import re
class Openshift(Plugin, RedHatPlugin):
"""This is the plugin for OCP 4.x collections. While this product is still
built ontop of kubernetes, there is enough difference in the collection
requirements and approach to warrant a separate plugin as opposed to
further extending the kubernetes plugin (or the OCP 3.x extensions included
in the Red Hat version of the kube plugin).
By default, this plugin will collect cluster information and inspect the
default namespaces/projects that are created during deployment - i.e. the
namespaces of the cluster projects matching openshift.* and kube.*. At the
time of this plugin's creation that number of default projects is already
north of 50; hence this plugin is expected to take a long time in both the
setup() and collect() phases. End-user projects may also be collected from
when those projects are included in the `add-namespaces` or
`only-namespaces` options.
It is expected to need to perform an `oc login` command in order for this
plugin to be able to correctly capture information, as system root is not
considered cluster root on the cluster nodes in order to access the API.
Users will need to either:
1) Provide the bearer token via the `-k openshift.token` option
2) Provide the bearer token via the `SOSOCPTOKEN` environment variable
3) Otherwise ensure that the root user can successfully run `oc` and
get proper output prior to running this plugin
It is highly suggested that option #2 be used first, as this will prevent
the token from being recorded in output saved to the archive. Option #1 may
be used if this is considered an acceptable risk. It is not recommended to
rely on option #3, though it will provide the functionality needed.
"""
short_desc = 'Openshift Container Platform 4.x'
plugin_name = "openshift"
plugin_timeout = 900
profiles = ('openshift',)
packages = ('openshift-hyperkube',)
option_list = [
('token', 'admin token to allow API queries', 'fast', None),
('host', 'host address to use for oc login, including port', 'fast',
'https://localhost:6443'),
('no-oc', 'do not collect `oc` command output', 'fast', False),
('podlogs', 'collect logs from each pod', 'fast', True),
('podlogs-filter', ('limit podlogs collection to pods matching this '
'regex'), 'fast', ''),
('only-namespaces', 'colon-delimited list of namespaces to collect',
'fast', ''),
('add-namespaces', ('colon-delimited list of namespaces to add to the '
'default collections'), 'fast', '')
]
def _check_oc_function(self):
"""Check to see if we can run `oc` commands"""
return self.exec_cmd('oc whoami')['status'] == 0
def _check_oc_logged_in(self):
"""See if we're logged in to the API service, and if not attempt to do
so using provided plugin options
"""
if self._check_oc_function():
return True
# Not logged in currently, attempt to do so
token = self.get_option('token') or os.getenv('SOSOCPTOKEN', None)
if token:
oc_res = self.exec_cmd("oc login %s --token=%s "
"--insecure-skip-tls-verify=True"
% (self.get_option('host'), token))
if oc_res['status'] == 0:
if self._check_oc_function():
return True
self._log_warn("Attempt to login to OCP API failed, will not run "
"or collect `oc` commands")
return False
self._log_warn("Not logged in to OCP API, and no login token provided."
" Will not collect `oc` commands")
return False
def _setup_namespace_regexes(self):
"""Combine a set of regexes for collection with any namespaces passed
to sos via the -k openshift.add-namespaces option. Note that this does
allow for end users to specify namespace regexes of their own.
"""
if self.get_option('only-namespaces'):
return [n for n in self.get_option('only-namespaces').split(':')]
collect_regexes = [
'openshift.*',
'kube.*'
]
if self.get_option('add-namespaces'):
for nsp in self.get_option('add-namespaces').split(':'):
collect_regexes.append(nsp)
return collect_regexes
def _reduce_namespace_list(self, nsps):
"""Reduce the namespace listing returned to just the ones we want to
collect from. By default, as requested by OCP support personnel, this
must include all 'openshift' prefixed namespaces
:param nsps list: Namespace names from oc output
"""
def _match_namespace(namespace):
"""Match a particular namespace for inclusion (or not) in the
collection phases
:param namespace str: The name of a namespace
"""
for regex in self.collect_regexes:
if re.match(regex, namespace):
return True
return False
self.collect_regexes = self._setup_namespace_regexes()
return list(set([n for n in nsps if _match_namespace(n)]))
def setup(self):
"""The setup() phase of this plugin will iterate through all default
projects (namespaces), and/or those specified via the `add-namespaces`
and `only-namespaces` plugin options. Both of these options accept
shell-style regexes.
Cluster-wide information, that is information that is not tied to a
specific namespace, will be saved in the top-level plugin directory.
Each namespace will have it's own subdir within the `namespaces` subdir
to aide in organization. From there, each namespace subdir will have a
subsequent subdir for each type of API resource the plugin collects.
In contrast with the `kubernetes` plugin, this plugin will collect
logs from all pods within each namespace, as well as the previous pod's
logs, by default. The `-k openshift.podlogs-filter` option can be used
to greatly reduce the amount of collected information.
"""
# Capture the kubelet journal, but don't use it as a service which
# would simultaneously enable this and the kubernetes plugin
self.add_journal('kubelet')
self.add_service_status('kubelet')
self.add_forbidden_path([
'/etc/kubernetes/*.crt',
'/etc/kubernetes/*.key',
])
self.add_copy_spec('/etc/kubernetes/*')
# see if we run `oc` commands
if not self.get_option('no-oc'):
can_run_oc = self._check_oc_logged_in()
else:
can_run_oc = False
if can_run_oc:
# with an out-of-the-box install, setup time alone has been known
# to take over 5 minutes. Print a notification message so that
# users don't prematurely think sos has hung during setup
self._log_warn(
'Note that the Openshift Container Platform plugin can be '
'expected in most configurations to take 5+ minutes in both '
'the setup and collection phases'
)
self.oc_cmd = "oc get "
oc_nsps = []
# get 'global' or cluster-level information
self.add_cmd_output([
'oc cluster-info',
'oc get -A pv',
'oc get -A csr',
'oc status',
'oc version'
])
# get non-namespaces api resources
self.collect_cluster_resources()
# get all namespaces, as data collection will be organized by that
_nm_res = self.collect_cmd_output("%s namespaces" % self.oc_cmd)
if _nm_res['status'] == 0:
nsps = [
n.split()[0] for n in _nm_res['output'].splitlines()[1:]
]
oc_nsps = self._reduce_namespace_list(nsps)
# collect each namespace individually
for namespace in oc_nsps:
self.collect_from_namespace(namespace)
def collect_cluster_resources(self):
"""Collect cluster-level (non-namespaced) resources from the API
"""
global_resources = [
'clusternetworks',
'clusteroperators',
'clusterversions',
'componentstatuses',
'configs',
'containerruntimeconfigs',
'controllerconfigs',
'dnses',
'hostsubnets',
'infrastructures',
'machineconfigpools',
'machineconfigs',
'netnamespaces',
'networks',
'nodes',
'proxies',
'storageclasses'
]
for resource in global_resources:
_subdir = "cluster_resources/%s" % resource
_tag = ["ocp_%s" % resource]
_res = self.collect_cmd_output("%s %s" % (self.oc_cmd, resource),
subdir=_subdir, tags=_tag)
if _res['status'] == 0:
for _res_name in _res['output'].splitlines()[1:]:
self.add_cmd_output(
"oc describe %s %s" % (resource, _res_name.split()[0]),
subdir=_subdir
)
def collect_from_namespace(self, namespace):
"""Run through the collection routines for an individual namespace.
This collection should include all requested resources that exist
within that namesapce
:param namespace str: The name of the namespace
"""
# define the list of resources to collect
resources = [
'buildconfigs',
'builds',
'catalogsourceconfigs',
'catalogsources',
'clusterserviceversions',
'configmaps',
'daemonsets',
'deploymentconfigs',
'deployments',
'events',
'horizontalpodautoscalers',
'imagestreams',
'ingresscontrollers',
'ingresses',
'installplans',
'limitranges',
'machines',
'machinesets',
'mcoconfigs',
'net-attach-def',
'operatorgroups',
'operatorsources',
'pods',
'pvc',
'resourcequotas',
'routes',
'secrets',
'services',
'statefulsets',
'subscriptions'
]
# save to namespace-specific subdirs to keep the plugin dir organized
subdir = "namespaces/%s" % namespace
# namespace-specific non-resource collections
self.add_cmd_output("oc describe namespace %s" % namespace,
subdir=subdir)
for res in resources:
_subdir = "%s/%s" % (subdir, res)
_tags = [
"ocp_%s" % res,
"ocp_%s_%s" % (namespace, res),
namespace
]
_get_cmd = "%s --namespace=%s %s" % (self.oc_cmd, namespace, res)
# get the 'normal' output first
_res_out = self.collect_cmd_output(
_get_cmd,
subdir=_subdir,
tags=_tags
)
# then get specific detail on each instance of the resource
if _res_out['status'] == 0:
_instances = _res_out['output'].splitlines()[1:]
for _instance in _instances:
_instance_name = _instance.split()[0]
self.add_cmd_output(
"%s %s -o yaml" % (_get_cmd, _instance_name),
subdir=_subdir,
suggest_filename="%s.yaml" % _instance_name
)
# check for podlogs here as a slight optimization to re-running
# 'oc get pods' on all namespaces
if res == 'pods' and _instances and self.get_option('podlogs'):
pod_list = [p.split()[0] for p in _instances]
self.collect_podlogs(namespace, pod_list)
def collect_podlogs(self, namespace, pod_list):
"""For any namespace that has active pods in it, collect the current
and previous pod's logs
:param pod_list list: A list of pod names
"""
_log_dir = "namespaces/%s/pods/podlogs" % namespace
if self.get_option('podlogs-filter'):
# this allows shell-style regex which is more commonly known by
# sysadmins than python-style regex
regex = translate(self.get_option('podlogs-filter'))
else:
regex = None
for pod in pod_list:
if regex and not re.match(regex, pod):
continue
_log_cmd = "oc logs --namespace=%s %s" % (namespace, pod)
self.add_cmd_output([
_log_cmd,
_log_cmd + " -p"
], subdir=_log_dir)
def postproc(self):
# clear any certificate output
self.do_cmd_private_sub('oc ')
self.do_file_private_sub('/etc/kubernetes/*')
# clear the certificate data from /etc/kubernetes that does not have
# the certificate banners that the _private_sub() methods look for
_fields = [
'.*.crt',
'client-certificate-data',
'client-key-data',
'certificate-authority-data',
'.*.key',
'token',
'.*token.*.value' # don't blind match `.*token.*` and lose names
]
regex = r'(\s*(%s):)(.*)' % '|'.join(_fields)
self.do_path_regex_sub('/etc/kubernetes/*', regex, r'\1 *******')
# scrub secret content
self.do_cmd_output_sub('secrets', regex, r'\1 *******')
# `oc describe` output can include url-encoded file content. For the
# most part this is not important as the majority of these instances
# are the contents of bash scripts. However, a select few can contain
# actual data, so just scrub everything that matches the describe
# format for this content
regex = r'(?P<var>(.*\\n)?Source:\s(.*),)((.*?))\n'
self.do_cmd_output_sub('oc describe', regex, r'\g<var> *******\n')
# vim: set et ts=4 sw=4 :
|
gpl-2.0
|
krieger-od/nwjs_chromium.src
|
chrome/test/ispy/server/update_mask_handler.py
|
100
|
2252
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Request Handler to allow test mask updates."""
import webapp2
import re
import sys
import os
from common import constants
from common import image_tools
from common import ispy_utils
import gs_bucket
class UpdateMaskHandler(webapp2.RequestHandler):
"""Request handler to allow test mask updates."""
def post(self):
"""Accepts post requests.
This method will accept a post request containing device, site and
device_id parameters. This method takes the diff of the run
indicated by it's parameters and adds it to the mask of the run's
test. It will then delete the run it is applied to and redirect
to the device list view.
"""
test_run = self.request.get('test_run')
expectation = self.request.get('expectation')
# Short-circuit if a parameter is missing.
if not (test_run and expectation):
self.response.headers['Content-Type'] = 'json/application'
self.response.write(json.dumps(
{'error': '\'test_run\' and \'expectation\' must be '
'supplied to update a mask.'}))
return
# Otherwise, set up the utilities.
self.bucket = gs_bucket.GoogleCloudStorageBucket(constants.BUCKET)
self.ispy = ispy_utils.ISpyUtils(self.bucket)
# Short-circuit if the failure does not exist.
if not self.ispy.FailureExists(test_run, expectation):
self.response.headers['Content-Type'] = 'json/application'
self.response.write(json.dumps(
{'error': 'Could not update mask because failure does not exist.'}))
return
# Get the failure namedtuple (which also computes the diff).
failure = self.ispy.GetFailure(test_run, expectation)
# Upload the new mask in place of the original.
self.ispy.UpdateImage(
ispy_utils.GetExpectationPath(expectation, 'mask.png'),
image_tools.ConvertDiffToMask(failure.diff))
# Now that there is no diff for the two images, remove the failure.
self.ispy.RemoveFailure(test_run, expectation)
# Redirect back to the sites list for the test run.
self.redirect('/?test_run=%s' % test_run)
|
bsd-3-clause
|
pawelkondraciuk/mopidy-chomikuj
|
mopidy_chomikuj/__init__.py
|
1
|
1699
|
from __future__ import unicode_literals
import logging
import os
# TODO: Remove entirely if you don't register GStreamer elements below
import pygst
pygst.require('0.10')
import gst
import gobject
from mopidy import config, ext
__version__ = '0.1.0'
# TODO: If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-Chomikuj'
ext_name = 'chomikuj'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
# TODO: Comment in and edit, or remove entirely
#schema['username'] = config.String()
#schema['password'] = config.Secret()
return schema
def setup(self, registry):
# You will typically only implement one of the following things
# in a single extension.
# TODO: Edit or remove entirely
from .frontend import FoobarFrontend
registry.add('frontend', FoobarFrontend)
# TODO: Edit or remove entirely
from .backend import FoobarBackend
registry.add('backend', FoobarBackend)
# TODO: Edit or remove entirely
from .mixer import FoobarMixer
gobject.type_register(FoobarMixer)
gst.element_register(FoobarMixer, 'foobarmixer', gst.RANK_MARGINAL)
# TODO: Edit or remove entirely
registry.add('http:static', {
'name': self.ext_name,
'path': os.path.join(os.path.dirname(__file__), 'static'),
})
|
apache-2.0
|
liangwang/m5
|
ext/ply/test/lex_hedit.py
|
174
|
1141
|
# -----------------------------------------------------------------------------
# hedit.py
#
# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
#
# These tokens can't be easily tokenized because they are of the following
# form:
#
# nHc1...cn
#
# where n is a positive integer and c1 ... cn are characters.
#
# This example shows how to modify the state of the lexer to parse
# such tokens
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'H_EDIT_DESCRIPTOR',
)
# Tokens
t_ignore = " \t\n"
def t_H_EDIT_DESCRIPTOR(t):
r"\d+H.*" # This grabs all of the remaining text
i = t.value.index('H')
n = eval(t.value[:i])
# Adjust the tokenizing position
t.lexer.lexpos -= len(t.value) - (i+1+n)
t.value = t.value[i+1:i+1+n]
return t
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex()
lex.runmain(data="3Habc 10Habcdefghij 2Hxy")
|
bsd-3-clause
|
kaulkie/keyczar
|
cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/packaging/zip.py
|
19
|
1767
|
"""SCons.Tool.Packaging.zip
The zip SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/zip.py 4043 2009/02/23 09:06:45 scons"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Zip']
bld.set_suffix('.zip')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
|
nooperpudd/pulsar
|
pulsar/apps/wsgi/utils.py
|
5
|
10984
|
'''
The :mod:`pulsar.apps.wsgi.utils` module include several utilities used
by various components in the :ref:`wsgi application <apps-wsgi>`
'''
import time
import re
import textwrap
import logging
from datetime import datetime, timedelta
from email.utils import formatdate
from urllib.parse import parse_qsl
from pulsar import format_traceback
from pulsar.utils.system import json
from pulsar.utils.structures import MultiValueDict
from pulsar.utils.html import escape
from pulsar.utils.pep import to_string
from pulsar.utils.httpurl import (has_empty_content, REDIRECT_CODES,
HTTPError, parse_dict_header,
JSON_CONTENT_TYPES)
from .structures import Accept, RequestCacheControl
from .content import Html, HtmlDocument
__all__ = ['handle_wsgi_error',
'render_error_debug',
'wsgi_request',
'set_wsgi_request_class',
'dump_environ',
'HOP_HEADERS']
DEFAULT_RESPONSE_CONTENT_TYPES = ('text/html', 'text/plain'
) + JSON_CONTENT_TYPES
HOP_HEADERS = frozenset(('connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'upgrade')
)
LOGGER = logging.getLogger('pulsar.wsgi')
error_css = '''
.pulsar-error {
width: 500px;
margin: 50px auto;
}
'''
_RequestClass = None
def wsgi_request(environ, app_handler=None, urlargs=None):
global _RequestClass
return _RequestClass(environ, app_handler=app_handler, urlargs=urlargs)
def set_wsgi_request_class(RequestClass):
global _RequestClass
_RequestClass = RequestClass
def log_wsgi_info(log, environ, status, exc=None):
if not environ.get('pulsar.logged'):
environ['pulsar.logged'] = True
msg = '' if not exc else ' - %s' % exc
log('%s %s %s - %s%s',
environ.get('REQUEST_METHOD'),
environ.get('RAW_URI'),
environ.get('SERVER_PROTOCOL'),
status, msg)
def cookie_date(epoch_seconds=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def set_cookie(cookies, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
'''Set a cookie key into the cookies dictionary *cookies*.'''
cookies[key] = value
if expires is not None:
if isinstance(expires, datetime):
now = (expires.now(expires.tzinfo) if expires.tzinfo else
expires.utcnow())
delta = expires - now
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
cookies[key]['expires'] = expires
if max_age is not None:
cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
cookies[key]['expires'] = cookie_date(time.time() + max_age)
if path is not None:
cookies[key]['path'] = path
if domain is not None:
cookies[key]['domain'] = domain
if secure:
cookies[key]['secure'] = True
if httponly:
cookies[key]['httponly'] = True
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of
``(value, quality)`` tuples sorted by the quality with some additional
accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`pulsar.apps.wsgi.structures.RequestCacheControl` is
used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def _gen_query(query_string, encoding):
# keep_blank_values=True
for key, value in parse_qsl((query_string or ''), True):
yield (to_string(key, encoding, errors='replace'),
to_string(value, encoding, errors='replace'))
def query_dict(query_string, encoding='utf-8'):
if query_string:
return dict(MultiValueDict(_gen_query(query_string, encoding)).items())
else:
return {}
error_messages = {
500: 'An exception has occurred while evaluating your request.',
404: 'Cannot find what you are looking for.'
}
class dump_environ(object):
__slots__ = ('environ',)
def __init__(self, environ):
self.environ = environ
def __str__(self):
def _():
for k, v in self.environ.items():
try:
v = str(v)
except Exception as e:
v = str(e)
yield '%s=%s' % (k, v)
return '\n%s\n' % '\n'.join(_())
def handle_wsgi_error(environ, exc):
'''The default error handler while serving a WSGI request.
:param environ: The WSGI environment.
:param exc: the exception
:return: a :class:`.WsgiResponse`
'''
if isinstance(exc, tuple):
exc_info = exc
exc = exc[1]
else:
exc_info = True
request = wsgi_request(environ)
request.cache.handle_wsgi_error = True
response = request.response
if isinstance(exc, HTTPError):
response.status_code = exc.code or 500
else:
response.status_code = getattr(exc, 'status', 500)
response.headers.update(getattr(exc, 'headers', None) or ())
path = '@ %s "%s"' % (request.method, request.path)
status = response.status_code
if status >= 500:
LOGGER.critical('Unhandled exception during HTTP response %s.\n%s',
path, dump_environ(environ), exc_info=exc_info)
else:
log_wsgi_info(LOGGER.warning, environ, response.status, exc)
if has_empty_content(status, request.method) or status in REDIRECT_CODES:
response.content_type = None
response.content = None
else:
request.cache.pop('html_document', None)
renderer = environ.get('error.handler') or render_error
try:
content = renderer(request, exc)
except Exception:
LOGGER.critical('Error while rendering error', exc_info=True)
response.content_type = 'text/plain'
content = 'Critical server error'
if content is not response:
response.content = content
return response
def render_error(request, exc):
'''Default renderer for errors.'''
cfg = request.get('pulsar.cfg')
debug = cfg.debug if cfg else False
response = request.response
if not response.content_type:
content_type = request.get('default.content_type')
response.content_type = request.content_types.best_match(
content_type or DEFAULT_RESPONSE_CONTENT_TYPES)
content_type = None
if response.content_type:
content_type = response.content_type.split(';')[0]
is_html = content_type == 'text/html'
if debug:
msg = render_error_debug(request, exc, is_html)
else:
msg = error_messages.get(response.status_code) or str(exc)
if is_html:
msg = textwrap.dedent("""
<h1>{0[reason]}</h1>
{0[msg]}
<h3>{0[version]}</h3>
""").format({"reason": response.status, "msg": msg,
"version": request.environ['SERVER_SOFTWARE']})
#
if content_type == 'text/html':
doc = HtmlDocument(title=response.status)
doc.head.embedded_css.append(error_css)
doc.body.append(Html('div', msg, cn='pulsar-error'))
return doc.render(request)
elif content_type in JSON_CONTENT_TYPES:
return json.dumps({'status': response.status_code,
'message': msg})
else:
return '\n'.join(msg) if isinstance(msg, (list, tuple)) else msg
def render_error_debug(request, exception, is_html):
'''Render the ``exception`` traceback
'''
error = Html('div', cn='well well-lg') if is_html else []
for trace in format_traceback(exception):
counter = 0
for line in trace.split('\n'):
if line.startswith(' '):
counter += 1
line = line[2:]
if line:
if is_html:
line = Html('p', escape(line), cn='text-danger')
if counter:
line.css({'margin-left': '%spx' % (20*counter)})
error.append(line)
if is_html:
error = Html('div', Html('h1', request.response.status), error)
return error
|
bsd-3-clause
|
JioCloud/nova_test_latest
|
nova/api/openstack/compute/contrib/server_group_quotas.py
|
79
|
1848
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import quota
QUOTAS = quota.QUOTAS
class ExtendedLimitsController(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj):
context = req.environ['nova.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs = resp_obj.obj.get('limits', {}).get('absolute', {})
abs['maxServerGroups'] = quotas.get('server_groups').get('limit')
abs['maxServerGroupMembers'] =\
quotas.get('server_group_members').get('limit')
class Server_group_quotas(extensions.ExtensionDescriptor):
"""Adds quota support to server groups."""
name = "ServerGroupQuotas"
alias = "os-server-group-quotas"
namespace = ("http://docs.openstack.org/compute/ext/"
"server-group-quotas/api/v2")
updated = "2014-07-25T00:00:00Z"
def get_controller_extensions(self):
extension_list = [extensions.ControllerExtension(self,
'limits',
ExtendedLimitsController()),
]
return extension_list
|
apache-2.0
|
JPGlaser/Tycho
|
src/tycho/read.py
|
1
|
5586
|
# Python Classes/Functions used to Import Tycho Datasets
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# TO-DO: Add time back to the read state function for Tyler's code
# Importing Necessary System Packages
import math
import io
import os
import numpy as np
import matplotlib as plt
import random as rp
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
#from amuse.couple import multiples
# Import the Amuse Stellar Packages
from amuse.ic.kingmodel import new_king_model
from amuse.ic.kroupa import new_kroupa_mass_distribution
# Import cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Tycho util import
from tycho import util
#from tycho import multiples2 as multiples
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def read_initial_state(file_prefix):
''' Reads in an initial state for the Tycho Module.
file_prefix: String Value for a Prefix to the Saved File
'''
# TODO: Also everything else in this function.
# First, Define the Directory where Initial State is Stored
file_dir = os.getcwd()+"/InitialState"
file_base = file_dir+"/"+file_prefix
# Second, Read the Master AMUSE Particle Set from a HDF5 File
file_format = "hdf5"
master_set = read_set_from_file(file_base+"_particles.hdf5", format=file_format, close_file=True)
# Third, unPickle the Initial Conditions Array
ic_file = open(file_base+"_ic.pkl", "rb")
ic_array = pickle.load(ic_file)
ic_file.close()
# Fourth, convert ic_array.total_smass and viral_radius from strings to floats
total_smass = float(ic_array.total_smass) | units.kg
viral_radius = float(ic_array.viral_radius) | units.m
# Fifth, Define the Master Set's Converter
converter = nbody_system.nbody_to_si(total_smass, viral_radius)
return master_set, ic_array, converter
# ------------------------------------ #
# RESTART FUNCTION #
# ------------------------------------ #
def read_state_from_file(restart_file, gravity_code, kep, SMALLN):
stars = read_set_from_file(restart_file+".stars.hdf5",'hdf5',version='2.0', close_file=True).copy()
stars_python = read_set_from_file(restart_file+".stars_python.hdf5",'hdf5',version='2.0', close_file=True).copy()
with open(restart_file + ".bookkeeping", "rb") as f:
bookkeeping = pickle.load(f)
f.close()
print(bookkeeping)
root_to_tree = {}
for root in stars:
if hasattr(root, 'components') and not root.components is None:
root_to_tree[root] = datamodel.trees.BinaryTreeOnParticle(root.components[0])
gravity_code.particles.add_particles(stars)
# print bookkeeping['model_time']
# gravity_code.set_begin_time = bookkeeping['model_time']
multiples_code = multiples.Multiples(gravity_code, SMALLN, kep, gravity_constant=units.constants.G)
multiples_code.neighbor_distance_factor = bookkeeping['neighbor_distance_factor']
multiples_code.neighbor_veto = bookkeeping['neighbor_veto']
multiples_code.multiples_external_tidal_correction = bookkeeping['multiples_external_tidal_correction']
multiples_code.multiples_integration_energy_error = bookkeeping['multiples_integration_energy_error']
multiples_code.multiples_internal_tidal_correction = bookkeeping['multiples_internal_tidal_correction']
multiples.root_index = bookkeeping['root_index']
multiples_code.root_to_tree = root_to_tree
# multiples_code.set_model_time = bookkeeping['model_time']
return stars_python, multiples_code
# ------------------------------------------ #
# RESTART CRASH FUNCTION #
# ------------------------------------------ #
def recover_crash(restart_file, gravity_code, kep, SMALLN):
# NEEDS SOME TENDER LOVE AND CARE
stars = read_set_from_file(restart_file+".stars.hdf5",'hdf5',version='2.0', close_file=True).copy()
stars_python = read_set_from_file(restart_file+".stars_python.hdf5",'hdf5',version='2.0', close_file=True).copy()
with open(restart_file + ".bookkeeping", "rb") as f:
bookkeeping = pickle.load(f)
f.close()
print(bookkeeping)
root_to_tree = {}
for root in stars:
if hasattr(root, 'components') and not root.components is None:
root_to_tree[root] = datamodel.trees.BinaryTreeOnParticle(root.components[0])
#gravity_code.particles.add_particles(stars)
#print bookkeeping['model_time']
gravity_code.set_begin_time = bookkeeping['model_time']
multiples_code = multiples.Multiples(gravity_code, SMALLN, kep, gravity_constant=units.constants.G)
multiples_code.neighbor_distance_factor = bookkeeping['neighbor_distance_factor']
multiples_code.neighbor_veto = bookkeeping['neighbor_veto']
multiples_code.multiples_external_tidal_correction = bookkeeping['multiples_external_tidal_correction']
multiples_code.multiples_integration_energy_error = bookkeeping['multiples_integration_energy_error']
multiples_code.multiples_internal_tidal_correction = bookkeeping['multiples_internal_tidal_correction']
multiples.root_index = bookkeeping['root_index']
multiples_code.root_to_tree = root_to_tree
#multiples_code.set_model_time = bookkeeping['model_time']
return bookkeeping['model_time'], multiples_code
|
mit
|
marcoantoniooliveira/labweb
|
oscar/lib/python2.7/site-packages/pygments/lexers/sql.py
|
70
|
23461
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.sql
~~~~~~~~~~~~~~~~~~~
Lexers for various SQL dialects and related interactive sessions.
Postgres specific lexers:
`PostgresLexer`
A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
lexer are:
- keywords and data types list parsed from the PG docs (run the
`_postgres_builtins` module to update them);
- Content of $-strings parsed using a specific lexer, e.g. the content
of a PL/Python function is parsed using the Python lexer;
- parse PG specific constructs: E-strings, $-strings, U&-strings,
different operators and punctuation.
`PlPgsqlLexer`
A lexer for the PL/pgSQL language. Adds a few specific construct on
top of the PG SQL lexer (such as <<label>>).
`PostgresConsoleLexer`
A lexer to highlight an interactive psql session:
- identifies the prompt and does its best to detect the end of command
in multiline statement where not all the lines are prefixed by a
prompt, telling them apart from the output;
- highlights errors in the output and notification levels;
- handles psql backslash commands.
The ``tests/examplefiles`` contains a few test files with data to be
parsed by these lexers.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
__all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer',
'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer']
line_re = re.compile('.*?\n')
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end()+100])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
m = list(language_re.finditer(
lexer.text[max(0, match.start()-100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1))
if l:
yield (match.start(1), String, match.group(1))
for x in l.get_tokens_unprocessed(match.group(2)):
yield x
yield (match.start(3), String, match.group(3))
else:
yield (match.start(), String, match.group())
class PostgresBase(object):
"""Base class for Postgres-related lexers.
This is implemented as a mixin to avoid the Lexer metaclass kicking in.
this way the different lexer don't have a common Lexer ancestor. If they
had, _tokens could be created on this ancestor and not updated for the
other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
seem to suggest that regexp lexers are not really subclassable.
"""
def get_tokens_unprocessed(self, text, *args):
# Have a copy of the entire text to be used by `language_callback`.
self.text = text
for x in super(PostgresBase, self).get_tokens_unprocessed(
text, *args):
yield x
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [ lang ]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
class PostgresLexer(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
*New in Pygments 1.5.*
"""
name = 'PostgreSQL SQL dialect'
aliases = ['postgresql', 'postgres']
mimetypes = ['text/x-postgresql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join([s.replace(" ", "\s+")
for s in DATATYPES + PSEUDO_TYPES])
+ r')\b', Name.Builtin),
(r'(' + '|'.join(KEYWORDS) + r')\b', Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'::', Operator), # cast
(r'\$\d+', Name.Variable),
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"(E|U&)?'(''|[^'])*'", String.Single),
(r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier
(r'(?s)(\$[^\$]*\$)(.*?)(\1)', language_callback),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
# psql variable in SQL
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
(r'[;:()\[\]\{\},\.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
}
class PlPgsqlLexer(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
*New in Pygments 1.5.*
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
if pattern[1] == Keyword:
tokens['root'][i] = (
r'(' + '|'.join(KEYWORDS + PLPGSQL_KEYWORDS) + r')\b',
Keyword)
del i
break
else:
assert 0, "SQL keywords not found"
# Add specific PL/pgSQL rules (before the SQL ones)
tokens['root'][:0] = [
(r'\%[a-z][a-z0-9_]*\b', Name.Builtin), # actually, a datatype
(r':=', Operator),
(r'\<\<[a-z][a-z0-9_]*\>\>', Name.Label),
(r'\#[a-z][a-z0-9_]*\b', Keyword.Pseudo), # #variable_conflict
]
class PsqlRegexLexer(PostgresBase, RegexLexer):
"""
Extend the PostgresLexer adding support specific for psql commands.
This is not a complete psql lexer yet as it lacks prompt support
and output rendering.
"""
name = 'PostgreSQL console - regexp based lexer'
aliases = [] # not public
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
tokens['psql-command'] = [
(r'\n', Text, 'root'),
(r'\s+', Text),
(r'\\[^\s]+', Keyword.Pseudo),
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Backtick),
(r"[^\s]+", String.Symbol),
]
re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]')
re_psql_command = re.compile(r'\s*\\')
re_end_command = re.compile(r';\s*(--.*?)?$')
re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$')
re_error = re.compile(r'(ERROR|FATAL):')
re_message = re.compile(
r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|'
r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)')
class lookahead(object):
"""Wrap an iterator and allow pushing back an item."""
def __init__(self, x):
self.iter = iter(x)
self._nextitem = None
def __iter__(self):
return self
def send(self, i):
self._nextitem = i
return i
def next(self):
if self._nextitem is not None:
ni = self._nextitem
self._nextitem = None
return ni
return self.iter.next()
class PostgresConsoleLexer(Lexer):
"""
Lexer for psql sessions.
*New in Pygments 1.5.*
"""
name = 'PostgreSQL console (psql)'
aliases = ['psql', 'postgresql-console', 'postgres-console']
mimetypes = ['text/x-postgresql-psql']
def get_tokens_unprocessed(self, data):
sql = PsqlRegexLexer(**self.options)
lines = lookahead(line_re.findall(data))
# prompt-output cycle
while 1:
# consume the lines of the command: start with an optional prompt
# and continue until the end of command is detected
curcode = ''
insertions = []
while 1:
try:
line = lines.next()
except StopIteration:
# allow the emission of partially collected items
# the repl loop will be broken below
break
# Identify a shell prompt in case of psql commandline example
if line.startswith('$') and not curcode:
lexer = get_lexer_by_name('console', **self.options)
for x in lexer.get_tokens_unprocessed(line):
yield x
break
# Identify a psql prompt
mprompt = re_prompt.match(line)
if mprompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, mprompt.group())]))
curcode += line[len(mprompt.group()):]
else:
curcode += line
# Check if this is the end of the command
# TODO: better handle multiline comments at the end with
# a lexer with an external state?
if re_psql_command.match(curcode) \
or re_end_command.search(curcode):
break
# Emit the combined stream of command and prompt(s)
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
# Emit the output lines
out_token = Generic.Output
while 1:
line = lines.next()
mprompt = re_prompt.match(line)
if mprompt is not None:
# push the line back to have it processed by the prompt
lines.send(line)
break
mmsg = re_message.match(line)
if mmsg is not None:
if mmsg.group(1).startswith("ERROR") \
or mmsg.group(1).startswith("FATAL"):
out_token = Generic.Error
yield (mmsg.start(1), Generic.Strong, mmsg.group(1))
yield (mmsg.start(2), out_token, mmsg.group(2))
else:
yield (0, out_token, line)
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Number.Integer),
# TODO: Backslash escapes?
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
# TODO: add backslash escapes
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r"`(``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
r'|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
*New in Pygments 0.11.*
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
|
bsd-3-clause
|
sparkslabs/kamaelia_
|
Sketches/TG/shard/PygameComponentShard.py
|
12
|
5183
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Shard import *
from ComponentShard import *
from LoopShard import *
from InitShard import initShard
from FunctionShard import functionShard
"""
Example to recreate Sketches/MPS/Shard/Shards.py with code generation
setup, i.e. class with the same functionality as ShardedPygameAppChassis
Current test (MagnaGen.py) successfully generates and runs MagnaDoodle
using this as a base
"""
class pygameComponentShard(componentShard):
"""
Generates a pygame kamaelia component if all required methods
and shards are supplied, else raises a DependencyError
Arguments:
name = string of component name, will be used as class
name. If None, an auto-generated name will be
used.
shards = the methods and inline shards to be added into the class,
as function objects or shard objects. At minimum these
must include this class's required methods and ishards,
else a DependencyError will be raised. Objects must be
named as the method or ishard they are supplying.
Any additional shards, e.g. extra methods, will be included
'as is' in the body of the class
"""
# use shard classmethods to state minimum required shards to generate this
shard.addReqMethods("blitToSurface", "waitBox", "drawBG", "addListenEvent" )
shard.addReqIShards("ShutdownHandler", "LoopOverPygameEvents", "RequestDisplay",
"__INIT__", "GrabDisplay", "SetEventOptions" )
# default information supplied by this class
sclasses = ["Axon.Component.component"]
dstr = 'Auto-generated pygame component'
inbxs = { "inbox" : "Receive events from PygameDisplay",
"control" : "For shutdown messages",
"callback" : "Receive callbacks from PygameDisplay"
}
outbxs = { "outbox" : "not used",
"signal" : "For shutdown messages",
"display_signal" : "Outbox used for communicating to the display surface" }
# default initialisation parameters
initargs = {}
initargs['cmpname'] = None
initargs['methods'] = []
initargs['ishards'] = {}
def __init__(self, name = None, shards = []):
mshards = []
ishards = {}
for s in shards:
if isinstance(s, shard):
sname = s.name
else: # assume function
sname = s.func_name
if sname in self.requiredIShards:
ishards[sname] = s
else:
mshards.append(s)
mshards = self.makeMethodShards(mshards)
self.checkDependencies(mshards, ishards)
# create default methods and add in shards
compInit = initShard(clsname = name, exkwarg = 'argd',
shards = [ishards['__INIT__']])
waitLoop = forShard(name = 'wait', inVar = r'self.waitBox("callback")',
shards = [['yield 1\n']])
mainLoop = whileShard(name = 'mainLoop', condition = 'not done',
shards = [ishards['ShutdownHandler'],
ishards['LoopOverPygameEvents'],
['self.pause()\n', 'yield 1\n']])
compMain = functionShard(funcname = "main", args = ['self'],
shards = [ishards["RequestDisplay"], waitLoop,
ishards['GrabDisplay'],
['self.drawBG()\n', 'self.blitToSurface()\n'],
ishards['SetEventOptions'], ['done = False\n'],
mainLoop])
# construct pygame component with default and supplied shards
componentShard.__init__(self, name, superclasses = self.sclasses,
docstring = self.dstr, inboxes = self.inbxs,
outboxes = self.outbxs,
shards = [compInit] + mshards + [compMain])
|
apache-2.0
|
rickhurst/Django-non-rel-blog
|
django/conf/locale/pl/formats.py
|
238
|
1288
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' '
NUMBER_GROUPING = 3
|
bsd-3-clause
|
jazkarta/edx-platform-for-isc
|
common/lib/xmodule/xmodule/annotator_token.py
|
211
|
1542
|
"""
This file contains a function used to retrieve the token for the annotation backend
without having to create a view, but just returning a string instead.
It can be called from other files by using the following:
from xmodule.annotator_token import retrieve_token
"""
import datetime
from firebase_token_generator import create_token
def retrieve_token(userid, secret):
'''
Return a token for the backend of annotations.
It uses the course id to retrieve a variable that contains the secret
token found in inheritance.py. It also contains information of when
the token was issued. This will be stored with the user along with
the id for identification purposes in the backend.
'''
# the following five lines of code allows you to include the default timezone in the iso format
# for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
# uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
# federated system in the annotation backend server
custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
newtoken = create_token(secret, custom_data)
return newtoken
|
agpl-3.0
|
puckipedia/youtube-dl
|
youtube_dl/extractor/restudy.py
|
146
|
1155
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class RestudyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?restudy\.dk/video/play/id/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://www.restudy.dk/video/play/id/1637',
'info_dict': {
'id': '1637',
'ext': 'flv',
'title': 'Leiden-frosteffekt',
'description': 'Denne video er et eksperiment med flydende kvælstof.',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
formats = self._extract_smil_formats(
'https://www.restudy.dk/awsmedia/SmilDirectory/video_%s.xml' % video_id,
video_id)
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats,
}
|
unlicense
|
appleseedhq/cortex
|
python/IECoreNuke/Menus.py
|
5
|
2131
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import IECoreNuke
def addOpCreationCommands( menu ) :
loader = IECore.ClassLoader.defaultOpLoader()
for c in loader.classNames() :
menuPath = "/".join( [ IECore.CamelCase.toSpaced( x ) for x in c.split( "/" ) ] )
menu.addCommand( menuPath, IECore.curry( IECoreNuke.FnOpHolder.create, os.path.basename( c ), c ) )
|
bsd-3-clause
|
hdinsight/hue
|
desktop/core/ext-py/Paste-2.0.1/paste/auth/cookie.py
|
50
|
16379
|
# (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
Cookie "Saved" Authentication
This authentication middleware saves the current REMOTE_USER,
REMOTE_SESSION, and any other environment variables specified in a
cookie so that it can be retrieved during the next request without
requiring re-authentication. This uses a session cookie on the client
side (so it goes away when the user closes their window) and does
server-side expiration.
Following is a very simple example where a form is presented asking for
a user name (no actual checking), and dummy session identifier (perhaps
corresponding to a database session id) is stored in the cookie.
::
>>> from paste.httpserver import serve
>>> from paste.fileapp import DataApp
>>> from paste.httpexceptions import *
>>> from paste.auth.cookie import AuthCookieHandler
>>> from paste.wsgilib import parse_querystring
>>> def testapp(environ, start_response):
... user = dict(parse_querystring(environ)).get('user','')
... if user:
... environ['REMOTE_USER'] = user
... environ['REMOTE_SESSION'] = 'a-session-id'
... if environ.get('REMOTE_USER'):
... page = '<html><body>Welcome %s (%s)</body></html>'
... page %= (environ['REMOTE_USER'], environ['REMOTE_SESSION'])
... else:
... page = ('<html><body><form><input name="user" />'
... '<input type="submit" /></form></body></html>')
... return DataApp(page, content_type="text/html")(
... environ, start_response)
>>> serve(AuthCookieHandler(testapp))
serving on...
"""
import hmac, base64, random, six, time, warnings
try:
from hashlib import sha1
except ImportError:
# NOTE: We have to use the callable with hashlib (hashlib.sha1),
# otherwise hmac only accepts the sha module object itself
import sha as sha1
from paste.request import get_cookies
def make_time(value):
return time.strftime("%Y%m%d%H%M", time.gmtime(value))
_signature_size = len(hmac.new(b'x', b'x', sha1).digest())
_header_size = _signature_size + len(make_time(time.time()))
# @@: Should this be using urllib.quote?
# build encode/decode functions to safely pack away values
_encode = [('\\', '\\x5c'), ('"', '\\x22'),
('=', '\\x3d'), (';', '\\x3b')]
_decode = [(v, k) for (k, v) in _encode]
_decode.reverse()
def encode(s, sublist = _encode):
return six.moves.reduce((lambda a, b: a.replace(b[0], b[1])), sublist, str(s))
decode = lambda s: encode(s, _decode)
class CookieTooLarge(RuntimeError):
def __init__(self, content, cookie):
RuntimeError.__init__("Signed cookie exceeds maximum size of 4096")
self.content = content
self.cookie = cookie
_all_chars = ''.join([chr(x) for x in range(0, 255)])
def new_secret():
""" returns a 64 byte secret """
secret = ''.join(random.sample(_all_chars, 64))
if six.PY3:
secret = secret.encode('utf8')
return secret
class AuthCookieSigner(object):
"""
save/restore ``environ`` entries via digially signed cookie
This class converts content into a timed and digitally signed
cookie, as well as having the facility to reverse this procedure.
If the cookie, after the content is encoded and signed exceeds the
maximum length (4096), then CookieTooLarge exception is raised.
The timeout of the cookie is handled on the server side for a few
reasons. First, if a 'Expires' directive is added to a cookie, then
the cookie becomes persistent (lasting even after the browser window
has closed). Second, the user's clock may be wrong (perhaps
intentionally). The timeout is specified in minutes; and expiration
date returned is rounded to one second.
Constructor Arguments:
``secret``
This is a secret key if you want to syncronize your keys so
that the cookie will be good across a cluster of computers.
It is recommended via the HMAC specification (RFC 2104) that
the secret key be 64 bytes since this is the block size of
the hashing. If you do not provide a secret key, a random
one is generated each time you create the handler; this
should be sufficient for most cases.
``timeout``
This is the time (in minutes) from which the cookie is set
to expire. Note that on each request a new (replacement)
cookie is sent, hence this is effectively a session timeout
parameter for your entire cluster. If you do not provide a
timeout, it is set at 30 minutes.
``maxlen``
This is the maximum size of the *signed* cookie; hence the
actual content signed will be somewhat less. If the cookie
goes over this size, a ``CookieTooLarge`` exception is
raised so that unexpected handling of cookies on the client
side are avoided. By default this is set at 4k (4096 bytes),
which is the standard cookie size limit.
"""
def __init__(self, secret = None, timeout = None, maxlen = None):
self.timeout = timeout or 30
if isinstance(timeout, six.string_types):
raise ValueError(
"Timeout must be a number (minutes), not a string (%r)"
% timeout)
self.maxlen = maxlen or 4096
self.secret = secret or new_secret()
def sign(self, content):
"""
Sign the content returning a valid cookie (that does not
need to be escaped and quoted). The expiration of this
cookie is handled server-side in the auth() function.
"""
timestamp = make_time(time.time() + 60*self.timeout)
if six.PY3:
content = content.encode('utf8')
timestamp = timestamp.encode('utf8')
cookie = base64.encodestring(
hmac.new(self.secret, content, sha1).digest() +
timestamp +
content)
cookie = cookie.replace(b"/", b"_").replace(b"=", b"~")
cookie = cookie.replace(b'\n', b'').replace(b'\r', b'')
if len(cookie) > self.maxlen:
raise CookieTooLarge(content, cookie)
return cookie
def auth(self, cookie):
"""
Authenticate the cooke using the signature, verify that it
has not expired; and return the cookie's content
"""
decode = base64.decodestring(
cookie.replace("_", "/").replace("~", "="))
signature = decode[:_signature_size]
expires = decode[_signature_size:_header_size]
content = decode[_header_size:]
if signature == hmac.new(self.secret, content, sha1).digest():
if int(expires) > int(make_time(time.time())):
return content
else:
# This is the normal case of an expired cookie; just
# don't bother doing anything here.
pass
else:
# This case can happen if the server is restarted with a
# different secret; or if the user's IP address changed
# due to a proxy. However, it could also be a break-in
# attempt -- so should it be reported?
pass
class AuthCookieEnviron(list):
"""
a list of environment keys to be saved via cookie
An instance of this object, found at ``environ['paste.auth.cookie']``
lists the `environ` keys that were restored from or will be added
to the digially signed cookie. This object can be accessed from an
`environ` variable by using this module's name.
"""
def __init__(self, handler, scanlist):
list.__init__(self, scanlist)
self.handler = handler
def append(self, value):
if value in self:
return
list.append(self, str(value))
class AuthCookieHandler(object):
"""
the actual handler that should be put in your middleware stack
This middleware uses cookies to stash-away a previously authenticated
user (and perhaps other variables) so that re-authentication is not
needed. This does not implement sessions; and therefore N servers
can be syncronized to accept the same saved authentication if they
all use the same cookie_name and secret.
By default, this handler scans the `environ` for the REMOTE_USER
and REMOTE_SESSION key; if found, it is stored. It can be
configured to scan other `environ` keys as well -- but be careful
not to exceed 2-3k (so that the encoded and signed cookie does not
exceed 4k). You can ask it to handle other environment variables
by doing:
``environ['paste.auth.cookie'].append('your.environ.variable')``
Constructor Arguments:
``application``
This is the wrapped application which will have access to
the ``environ['REMOTE_USER']`` restored by this middleware.
``cookie_name``
The name of the cookie used to store this content, by default
it is ``PASTE_AUTH_COOKIE``.
``scanlist``
This is the initial set of ``environ`` keys to
save/restore to the signed cookie. By default is consists
only of ``REMOTE_USER`` and ``REMOTE_SESSION``; any tuple
or list of environment keys will work. However, be
careful, as the total saved size is limited to around 3k.
``signer``
This is the signer object used to create the actual cookie
values, by default, it is ``AuthCookieSigner`` and is passed
the remaining arguments to this function: ``secret``,
``timeout``, and ``maxlen``.
At this time, each cookie is individually signed. To store more
than the 4k of data; it is possible to sub-class this object to
provide different ``environ_name`` and ``cookie_name``
"""
environ_name = 'paste.auth.cookie'
cookie_name = 'PASTE_AUTH_COOKIE'
signer_class = AuthCookieSigner
environ_class = AuthCookieEnviron
def __init__(self, application, cookie_name=None, scanlist=None,
signer=None, secret=None, timeout=None, maxlen=None):
if not signer:
signer = self.signer_class(secret, timeout, maxlen)
self.signer = signer
self.scanlist = scanlist or ('REMOTE_USER','REMOTE_SESSION')
self.application = application
self.cookie_name = cookie_name or self.cookie_name
def __call__(self, environ, start_response):
if self.environ_name in environ:
raise AssertionError("AuthCookie already installed!")
scanlist = self.environ_class(self, self.scanlist)
jar = get_cookies(environ)
if self.cookie_name in jar:
content = self.signer.auth(jar[self.cookie_name].value)
if content:
for pair in content.split(";"):
(k, v) = pair.split("=")
k = decode(k)
if k not in scanlist:
scanlist.append(k)
if k in environ:
continue
environ[k] = decode(v)
if 'REMOTE_USER' == k:
environ['AUTH_TYPE'] = 'cookie'
environ[self.environ_name] = scanlist
if "paste.httpexceptions" in environ:
warnings.warn("Since paste.httpexceptions is hooked in your "
"processing chain before paste.auth.cookie, if an "
"HTTPRedirection is raised, the cookies this module sets "
"will not be included in your response.\n")
def response_hook(status, response_headers, exc_info=None):
"""
Scan the environment for keys specified in the scanlist,
pack up their values, signs the content and issues a cookie.
"""
scanlist = environ.get(self.environ_name)
assert scanlist and isinstance(scanlist, self.environ_class)
content = []
for k in scanlist:
v = environ.get(k)
if v is not None:
if type(v) is not str:
raise ValueError(
"The value of the environmental variable %r "
"is not a str (only str is allowed; got %r)"
% (k, v))
content.append("%s=%s" % (encode(k), encode(v)))
if content:
content = ";".join(content)
content = self.signer.sign(content)
if six.PY3:
content = content.decode('utf8')
cookie = '%s=%s; Path=/;' % (self.cookie_name, content)
if 'https' == environ['wsgi.url_scheme']:
cookie += ' secure;'
response_headers.append(('Set-Cookie', cookie))
return start_response(status, response_headers, exc_info)
return self.application(environ, response_hook)
middleware = AuthCookieHandler
# Paste Deploy entry point:
def make_auth_cookie(
app, global_conf,
# Should this get picked up from global_conf somehow?:
cookie_name='PASTE_AUTH_COOKIE',
scanlist=('REMOTE_USER', 'REMOTE_SESSION'),
# signer cannot be set
secret=None,
timeout=30,
maxlen=4096):
"""
This middleware uses cookies to stash-away a previously
authenticated user (and perhaps other variables) so that
re-authentication is not needed. This does not implement
sessions; and therefore N servers can be syncronized to accept the
same saved authentication if they all use the same cookie_name and
secret.
By default, this handler scans the `environ` for the REMOTE_USER
and REMOTE_SESSION key; if found, it is stored. It can be
configured to scan other `environ` keys as well -- but be careful
not to exceed 2-3k (so that the encoded and signed cookie does not
exceed 4k). You can ask it to handle other environment variables
by doing:
``environ['paste.auth.cookie'].append('your.environ.variable')``
Configuration:
``cookie_name``
The name of the cookie used to store this content, by
default it is ``PASTE_AUTH_COOKIE``.
``scanlist``
This is the initial set of ``environ`` keys to
save/restore to the signed cookie. By default is consists
only of ``REMOTE_USER`` and ``REMOTE_SESSION``; any
space-separated list of environment keys will work.
However, be careful, as the total saved size is limited to
around 3k.
``secret``
The secret that will be used to sign the cookies. If you
don't provide one (and none is set globally) then a random
secret will be created. Each time the server is restarted
a new secret will then be created and all cookies will
become invalid! This can be any string value.
``timeout``
The time to keep the cookie, expressed in minutes. This
is handled server-side, so a new cookie with a new timeout
is added to every response.
``maxlen``
The maximum length of the cookie that is sent (default 4k,
which is a typical browser maximum)
"""
if isinstance(scanlist, six.string_types):
scanlist = scanlist.split()
if secret is None and global_conf.get('secret'):
secret = global_conf['secret']
try:
timeout = int(timeout)
except ValueError:
raise ValueError('Bad value for timeout (must be int): %r'
% timeout)
try:
maxlen = int(maxlen)
except ValueError:
raise ValueError('Bad value for maxlen (must be int): %r'
% maxlen)
return AuthCookieHandler(
app, cookie_name=cookie_name, scanlist=scanlist,
secret=secret, timeout=timeout, maxlen=maxlen)
__all__ = ['AuthCookieHandler', 'AuthCookieSigner', 'AuthCookieEnviron']
if "__main__" == __name__:
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
apache-2.0
|
gcarranza/couchdb-python
|
couchdb/design.py
|
7
|
8182
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Utility code for managing design documents."""
from copy import deepcopy
from inspect import getsource
from itertools import groupby
from operator import attrgetter
from textwrap import dedent
from types import FunctionType
__all__ = ['ViewDefinition']
__docformat__ = 'restructuredtext en'
class ViewDefinition(object):
r"""Definition of a view stored in a specific design document.
An instance of this class can be used to access the results of the view,
as well as to keep the view definition in the design document up to date
with the definition in the application code.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
>>> view = ViewDefinition('tests', 'all', '''function(doc) {
... emit(doc._id, null);
... }''')
>>> view.get_doc(db)
The view is not yet stored in the database, in fact, design doc doesn't
even exist yet. That can be fixed using the `sync` method:
>>> view.sync(db) #doctest: +ELLIPSIS
[(True, u'_design/tests', ...)]
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document u'_design/tests'@u'...' {...}>
>>> print(design_doc['views']['all']['map'])
function(doc) {
emit(doc._id, null);
}
If you use a Python view server, you can also use Python functions instead
of code embedded in strings:
>>> def my_map(doc):
... yield doc['somekey'], doc['somevalue']
>>> view = ViewDefinition('test2', 'somename', my_map, language='python')
>>> view.sync(db) #doctest: +ELLIPSIS
[(True, u'_design/test2', ...)]
>>> design_doc = view.get_doc(db)
>>> design_doc #doctest: +ELLIPSIS
<Document u'_design/test2'@u'...' {...}>
>>> print(design_doc['views']['somename']['map'])
def my_map(doc):
yield doc['somekey'], doc['somevalue']
Use the static `sync_many()` method to create or update a collection of
views in the database in an atomic and efficient manner, even across
different design documents.
>>> del server['python-tests']
"""
def __init__(self, design, name, map_fun, reduce_fun=None,
language='javascript', wrapper=None, options=None,
**defaults):
"""Initialize the view definition.
Note that the code in `map_fun` and `reduce_fun` is automatically
dedented, that is, any common leading whitespace is removed from each
line.
:param design: the name of the design document
:param name: the name of the view
:param map_fun: the map function code
:param reduce_fun: the reduce function code (optional)
:param language: the name of the language used
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: view specific options (e.g. {'collation':'raw'})
"""
if design.startswith('_design/'):
design = design[8:]
self.design = design
self.name = name
if isinstance(map_fun, FunctionType):
map_fun = _strip_decorators(getsource(map_fun).rstrip())
self.map_fun = dedent(map_fun.lstrip('\n'))
if isinstance(reduce_fun, FunctionType):
reduce_fun = _strip_decorators(getsource(reduce_fun).rstrip())
if reduce_fun:
reduce_fun = dedent(reduce_fun.lstrip('\n'))
self.reduce_fun = reduce_fun
self.language = language
self.wrapper = wrapper
self.options = options
self.defaults = defaults
def __call__(self, db, **options):
"""Execute the view in the given database.
:param db: the `Database` instance
:param options: optional query string parameters
:return: the view results
:rtype: `ViewResults`
"""
wrapper = options.pop('wrapper', self.wrapper)
merged_options = self.defaults.copy()
merged_options.update(options)
return db.view('/'.join([self.design, self.name]),
wrapper=wrapper, **merged_options)
def __repr__(self):
return '<%s %r>' % (type(self).__name__, '/'.join([
'_design', self.design, '_view', self.name
]))
def get_doc(self, db):
"""Retrieve and return the design document corresponding to this view
definition from the given database.
:param db: the `Database` instance
:return: a `client.Document` instance, or `None` if the design document
does not exist in the database
:rtype: `Document`
"""
return db.get('_design/%s' % self.design)
def sync(self, db):
"""Ensure that the view stored in the database matches the view defined
by this instance.
:param db: the `Database` instance
"""
return type(self).sync_many(db, [self])
@staticmethod
def sync_many(db, views, remove_missing=False, callback=None):
"""Ensure that the views stored in the database that correspond to a
given list of `ViewDefinition` instances match the code defined in
those instances.
This function might update more than one design document. This is done
using the CouchDB bulk update feature to ensure atomicity of the
operation.
:param db: the `Database` instance
:param views: a sequence of `ViewDefinition` instances
:param remove_missing: whether views found in a design document that
are not found in the list of `ViewDefinition`
instances should be removed
:param callback: a callback function that is invoked when a design
document gets updated; the callback gets passed the
design document as only parameter, before that doc
has actually been saved back to the database
"""
docs = []
views = sorted(views, key=attrgetter('design'))
for design, views in groupby(views, key=attrgetter('design')):
doc_id = '_design/%s' % design
doc = db.get(doc_id, {'_id': doc_id})
orig_doc = deepcopy(doc)
languages = set()
missing = list(doc.get('views', {}).keys())
for view in views:
funcs = {'map': view.map_fun}
if view.reduce_fun:
funcs['reduce'] = view.reduce_fun
if view.options:
funcs['options'] = view.options
doc.setdefault('views', {})[view.name] = funcs
languages.add(view.language)
if view.name in missing:
missing.remove(view.name)
if remove_missing and missing:
for name in missing:
del doc['views'][name]
elif missing and 'language' in doc:
languages.add(doc['language'])
if len(languages) > 1:
raise ValueError('Found different language views in one '
'design document (%r)', list(languages))
doc['language'] = list(languages)[0]
if doc != orig_doc:
if callback is not None:
callback(doc)
docs.append(doc)
return db.update(docs)
def _strip_decorators(code):
retval = []
beginning = True
for line in code.splitlines():
if beginning and not line.isspace():
if line.lstrip().startswith('@'):
continue
beginning = False
retval.append(line)
return '\n'.join(retval)
|
bsd-3-clause
|
thor27/multimonitor-browser
|
detect_monitors.py
|
1
|
5052
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Detect Screens
Copyright (C) 2016 Thomaz de Oliveira dos Reis <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import gtk
import sys
import argparse
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class Kiosk(object):
__NUM_KIOSKS = 0
@staticmethod
def push_kiosk():
Kiosk.__NUM_KIOSKS += 1
return Kiosk.__NUM_KIOSKS
@staticmethod
def pop_kiosk():
Kiosk.__NUM_KIOSKS -= 1
return Kiosk.__NUM_KIOSKS
def __init__(self, monitor, disable_close=True, text="Hello World"):
self.disable_close = disable_close
self.text = text
self.create_window()
self.move_to_monitor(monitor)
self.fullscreen()
Kiosk.push_kiosk()
def create_window(self):
self.window = gtk.Window()
self.screen = self.window.get_screen()
self.label = gtk.Label()
self.label.set_markup("<span weight='bold' size='100000'>{0}</span>".format(self.text))
self.label.set_alignment(xalign=0.05, yalign=0.05)
self.window.add(self.label)
self.window.connect("delete-event", self.on_close)
self.window.show_all()
def fullscreen(self):
self.window.fullscreen()
self.window.show_all()
self.window.set_resizable(False)
def move_to_monitor(self, id):
monitor = self.screen.get_monitor_geometry(id)
self.window.move(monitor.x, monitor.y)
self.window.set_size_request(monitor.width, monitor.height)
def on_close(self, *args, **kwargs):
if self.disable_close:
return True
if not Kiosk.pop_kiosk():
gtk.main_quit()
def get_text(parent, message, default=''):
"""
Display a dialog with a text entry.
Returns the text, or None if canceled.
"""
d = gtk.MessageDialog(parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
message)
entry = gtk.Entry()
entry.set_text(default)
entry.show()
def filter_numbers(entry, *args):
text = entry.get_text().strip()
entry.set_text(''.join([i for i in text if i.isdigit()]))
entry.connect('changed', filter_numbers)
d.vbox.pack_end(entry)
entry.connect('activate', lambda _: d.response(gtk.RESPONSE_OK))
d.set_default_response(gtk.RESPONSE_OK)
r = d.run()
text = entry.get_text().decode('utf8')
d.destroy()
if r == gtk.RESPONSE_OK:
return text
else:
return None
def parse_args(args):
parser = argparse.ArgumentParser(description='Detect screens')
parser.add_argument('urls', metavar='URL', type=str, nargs='+',
help='All urls you want to show')
parser.add_argument('-m', '--messages', type=str, nargs='+', dest='messages',
help='Text of each message to ask for monitor URL')
parser.add_argument('-d', '--default-url', type=str, dest='default_url', default='about:blank',
help='Default URL to use (Default: about:blank)')
parser.add_argument('-c', '--allow-close', dest='disable_close', action='store_false',
help='Allow browser window to be closed')
return parser.parse_args(args)
def get_num_monitors():
screen = gtk.gdk.Screen()
return screen.get_n_monitors()
def main():
args = parse_args(sys.argv[1:])
num_monitors = get_num_monitors()
for monitor in range(num_monitors):
Kiosk(
monitor,
text="{0}".format(monitor),
disable_close=args.disable_close,
)
result = [args.default_url for x in range(num_monitors)]
for pos, url in enumerate(args.urls[:num_monitors]):
message = args.messages[pos] if args.messages and pos < len(args.messages) else url
while True:
user_input = get_text(None, message)
if user_input:
monitor_number = int(user_input)
if monitor_number < num_monitors and result[monitor_number] == args.default_url:
break
result[monitor_number] = url
return result
if __name__ == '__main__':
print(" ".join(main()))
|
gpl-3.0
|
sdu-cfei/modest-py
|
modestpy/estim/ga/individual.py
|
1
|
6300
|
"""
Copyright (c) 2017, University of Southern Denmark
All rights reserved.
This code is licensed under BSD 2-clause license.
See LICENSE file in the project root for license terms.
"""
import logging
import random
import pandas as pd
import numpy as np
import copy
from modestpy.estim.error import calc_err
class Individual(object):
def __init__(self, est_objects, population, genes=None,
use_init_guess=False, ftype='NRMSE'):
"""
Individual can be initialized using `genes` OR initial guess
in `est_objects` (genes are inferred from parameters and vice versa).
Otherwise, random genes are assumed.
:param est_objects: List of EstPar objects with estimated parameters
:type est_objects: list(EstPar)
:param Population population: Population instance
:param genes: Genes (can be also inferred from `parameters`)
:type genes: dict(str: float)
:param bool use_init_guess: If True, use initial guess from
`est_objects`
:param str ftype: Cost function type, 'RMSE' or 'NRMSE'
"""
self.logger = logging.getLogger(type(self).__name__)
# Reference to the population object
self.population = population
# Assign variables shared across the population
self.ideal = population.ideal
self.model = population.model
# Cost function type
self.ftype = ftype
# Deep copy EstPar instances to avoid sharing between individuals
self.est_par_objects = copy.deepcopy(est_objects)
# Generate genes
if not genes and not use_init_guess:
# Generate random genes
est_names = Individual._get_names(self.est_par_objects)
self.genes = Individual._random_genes(est_names)
elif genes and not use_init_guess:
# Use provided genes
self.genes = copy.deepcopy(genes)
elif use_init_guess and not genes:
# Infer genes from parameters
self.genes = dict()
for p in self.est_par_objects:
self.genes[p.name] = (p.value - p.lo) / (p.hi - p.lo)
assert self.genes[p.name] >= 0. and self.genes[p.name] <= 1., \
'Initial guess outside the bounds'
else:
msg = 'Either genes or parameters have to be None'
self.logger.error(msg)
raise ValueError(msg)
# Update parameters
self._update_parameters()
# Individual result
self.result = None
self.error = None
# Main methods ------------------------------
def calculate(self):
# Just in case, individual result and error
# are cleared before simulation
self.reset()
# Important to set estimated parameters just before simulation,
# because all individuals share the same model instance
self.model.set_param(self.est_par_df)
# Simulation
self.result = self.model.simulate()
# Make sure the returned result is not empty
assert self.result.empty is False, \
'Empty result returned from simulation... (?)'
# Calculate error
self.logger.debug("Calculating error ({}) in individual {}"
.format(self.ftype, self.genes))
self.error = calc_err(self.result, self.ideal, ftype=self.ftype)
def reset(self):
self.result = None
self.error = None
self.est_par_objects = copy.deepcopy(self.est_par_objects)
def set_gene(self, name, value):
self.genes[name] = value
self._update_parameters()
def get_gene(self, name):
return self.genes[name]
def get_sorted_gene_names(self):
return sorted(self.genes.keys())
def get_estimates(self, as_dict=False):
"""
:param as_dict: boolean (True to get dictionary instead DataFrame)
:return: DataFrame with estimated parameters
"""
df = pd.DataFrame()
for par in self.est_par_objects:
df[par.name] = np.array([par.value])
if as_dict:
return df.to_dict()
else:
return df
def get_estimates_and_error(self):
estimates = self.get_estimates()
estimates['_error_'] = self.error['tot']
return estimates
def get_clone(self):
clone = Individual(self.est_par_objects, self.population,
self.genes, ftype=self.ftype)
return clone
# Private methods ---------------------------
def _update_parameters(self):
# Calculate parameter values
self.est_par_objects = self._calc_parameters(self.genes)
# Convert estimated parameters to dataframe
self.est_par_df = Individual._est_pars_2_df(self.est_par_objects)
@staticmethod
def _est_pars_2_df(est_pars):
df = pd.DataFrame()
for p in est_pars:
df[p.name] = np.array([p.value])
return df
def _calc_parameters(self, genes):
"""
Calculates parameters based on genes and limits.
:return: None
"""
for par in self.est_par_objects:
gene = genes[par.name]
par.value = par.lo + gene * (par.hi - par.lo)
return self.est_par_objects
@staticmethod
def _random_genes(par_names):
"""
Generates random genes.
:return: dict(str: float)
"""
genes = dict()
for par in par_names:
g = 0
while g == 0: # Because random.random() can return 0
g = random.random()
genes[par] = g
return genes
@staticmethod
def _get_names(est_params):
names = list()
for par in est_params:
names.append(par.name)
return names
# Overriden methods --------------------------
def __str__(self):
s = 'Individual ('
for par in self.est_par_objects:
s += par.name + '={0:.3f}'.format(par.value)
s += ', '
# Delete trailing comma
s = s[:-2]
s += '), err='
if self.error:
s += '{:.4f} '.format(self.error['tot'])
else:
s += 'None'
return s
|
bsd-2-clause
|
tklengyel/dionaea
|
modules/python/scripts/smb/include/asn1/mib.py
|
11
|
5595
|
#********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2010 Markus Koetter
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact [email protected]
#*
#*******************************************************************************/
#* This file was part of Scapy
#* See http://www.secdev.org/projects/scapy for more informations
#* Copyright (C) Philippe Biondi <[email protected]>
#* This program is published under a GPLv2 license
#*******************************************************************************
import re
from glob import glob
from scapy.dadict import DADict,fixname
from scapy.config import conf
from scapy.utils import do_graph
#################
## MIB parsing ##
#################
_mib_re_integer = re.compile("^[0-9]+$")
_mib_re_both = re.compile("^([a-zA-Z_][a-zA-Z0-9_-]*)\(([0-9]+)\)$")
_mib_re_oiddecl = re.compile("$\s*([a-zA-Z0-9_-]+)\s+OBJECT([^:\{\}]|\{[^:]+\})+::=\s*\{([^\}]+)\}",re.M)
_mib_re_strings = re.compile('"[^"]*"')
_mib_re_comments = re.compile('--.*(\r|\n)')
class MIBDict(DADict):
def _findroot(self, x):
if x.startswith("."):
x = x[1:]
if not x.endswith("."):
x += "."
max=0
root="."
for k in list(self.keys()):
if x.startswith(self[k]+"."):
if max < len(self[k]):
max = len(self[k])
root = k
return root, x[max:-1]
def _oidname(self, x):
root,remainder = self._findroot(x)
return root+remainder
def _oid(self, x):
xl = x.strip(".").split(".")
p = len(xl)-1
while p >= 0 and _mib_re_integer.match(xl[p]):
p -= 1
if p != 0 or xl[p] not in self:
return x
xl[p] = self[xl[p]]
return ".".join(xl[p:])
def _make_graph(self, other_keys=[], **kargs):
nodes = [(k,self[k]) for k in list(self.keys())]
oids = [self[k] for k in list(self.keys())]
for k in other_keys:
if k not in oids:
nodes.append(self.oidname(k),k)
s = 'digraph "mib" {\n\trankdir=LR;\n\n'
for k,o in nodes:
s += '\t"%s" [ label="%s" ];\n' % (o,k)
s += "\n"
for k,o in nodes:
parent,remainder = self._findroot(o[:-1])
remainder = remainder[1:]+o[-1]
if parent != ".":
parent = self[parent]
s += '\t"%s" -> "%s" [label="%s"];\n' % (parent, o,remainder)
s += "}\n"
do_graph(s, **kargs)
def __len__(self):
return len(list(self.keys()))
def mib_register(ident, value, the_mib, unresolved):
if ident in the_mib or ident in unresolved:
return ident in the_mib
resval = []
not_resolved = 0
for v in value:
if _mib_re_integer.match(v):
resval.append(v)
else:
v = fixname(v)
if v not in the_mib:
not_resolved = 1
if v in the_mib:
v = the_mib[v]
elif v in unresolved:
v = unresolved[v]
if type(v) is list:
resval += v
else:
resval.append(v)
if not_resolved:
unresolved[ident] = resval
return False
else:
the_mib[ident] = resval
keys = list(unresolved.keys())
i = 0
while i < len(keys):
k = keys[i]
if mib_register(k,unresolved[k], the_mib, {}):
del(unresolved[k])
del(keys[i])
i = 0
else:
i += 1
return True
def load_mib(filenames):
the_mib = {'iso': ['1']}
unresolved = {}
for k in list(conf.mib.keys()):
mib_register(k, conf.mib[k].split("."), the_mib, unresolved)
if type(filenames) is str:
filenames = [filenames]
for fnames in filenames:
for fname in glob(fnames):
f = open(fname)
text = f.read()
cleantext = " ".join(_mib_re_strings.split(" ".join(_mib_re_comments.split(text))))
for m in _mib_re_oiddecl.finditer(cleantext):
gr = m.groups()
ident,oid = gr[0],gr[-1]
ident=fixname(ident)
oid = oid.split()
for i in range(len(oid)):
m = _mib_re_both.match(oid[i])
if m:
oid[i] = m.groups()[1]
mib_register(ident, oid, the_mib, unresolved)
newmib = MIBDict(_name="MIB")
for k,o in the_mib.items():
newmib[k]=".".join(o)
for k,o in unresolved.items():
newmib[k]=".".join(o)
conf.mib=newmib
conf.mib = MIBDict(_name="MIB")
|
gpl-2.0
|
glenn-edgar/local_controller_3
|
__backup__/flask_web/werkzeug/local.py
|
84
|
13416
|
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wsgi import ClosingIterator
from werkzeug._internal import _patch_wrapper
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident.
try:
from greenlet import getcurrent as get_ident
except ImportError: # pragma: no cover
try:
from thread import get_ident
except ImportError: # pragma: no cover
from dummy_thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return _patch_wrapper(func, self.make_middleware(func))
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
|
mit
|
timzhangau/ml_nano
|
ud120/tools/parse_out_email_text.py
|
15
|
1277
|
#!/usr/bin/python
from nltk.stem.snowball import SnowballStemmer
import string
def parseOutText(f):
""" given an opened email file f, parse out all text below the
metadata block at the top
(in Part 2, you will also add stemming capabilities)
and return a string that contains all the words
in the email (space-separated)
example use case:
f = open("email_file_name.txt", "r")
text = parseOutText(f)
"""
f.seek(0) ### go back to beginning of file (annoying)
all_text = f.read()
### split off metadata
content = all_text.split("X-FileName:")
words = ""
if len(content) > 1:
### remove punctuation
text_string = content[1].translate(string.maketrans("", ""), string.punctuation)
### project part 2: comment out the line below
words = text_string
### split the text string into individual words, stem each word,
### and append the stemmed word to words (make sure there's a single
### space between each stemmed word)
return words
def main():
ff = open("../text_learning/test_email.txt", "r")
text = parseOutText(ff)
print text
if __name__ == '__main__':
main()
|
mit
|
AsherBond/MondocosmOS
|
grass_trunk/scripts/m.proj/m.proj.py
|
2
|
8042
|
#!/usr/bin/env python
############################################################################
#
# MODULE: m.proj
# AUTHOR: M. Hamish Bowman, Dept. Marine Science, Otago Univeristy,
# New Zealand
# Converted to Python by Glynn Clements
# PURPOSE: cs2cs reprojection frontend for a list of coordinates.
# Replacement for m.proj2 from GRASS 5
# COPYRIGHT: (c) 2006-2009 Hamish Bowman, and the GRASS Development Team
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
# notes:
# - cs2cs expects "x y" data so be sure to send it "lon lat" not "lat lon"
# - if you send cs2cs a third data column, beware it might be treated as "z"
# todo:
# - `cut` away x,y columns into a temp file, feed to cs2cs, then `paste`
# back to input file. see method in v.in.garmin.sh. that way additional
# numeric and string columns would survive the trip, and 3rd column would
# not be modified as z.
#%module
#% description: Converts coordinates from one projection to another (cs2cs frontend).
#% keywords: miscellaneous
#% keywords: projection
#%end
#%option G_OPT_F_INPUT
#% description: Name of input coordinate file ('-' to read from stdin)
#% answer: -
#% guisection: Files & format
#%end
#%option G_OPT_F_OUTPUT
#% description: Name for output coordinate file (omit to send to stdout)
#% required : no
#% guisection: Files & format
#%end
#%option G_OPT_F_SEP
#% label: Field separator (format: input[,output])
#% description: Valid field separators are also "space", "tab", or "comma"
#% required : no
#% guisection: Files & format
#%end
#%option
#% key: proj_input
#% type: string
#% description: Input projection parameters (PROJ.4 style)
#% required : no
#% guisection: Projections
#%end
#%option
#% key: proj_output
#% type: string
#% description: Output projection parameters (PROJ.4 style)
#% required : no
#% guisection: Projections
#%end
#%flag
#% key: i
#% description: Use LL WGS84 as input and current location as output projection
#% guisection: Projections
#%end
#%flag
#% key: o
#% description: Use current location as input and LL WGS84 as output projection
#% guisection: Projections
#%end
#%flag
#% key: d
#% description: Output long/lat in decimal degrees, or other projections with many decimal places
#% guisection: Files & format
#%end
#%flag
#% key: e
#% description: Include input coordinates in output file
#% guisection: Files & format
#%end
#%flag
#% key: c
#% description: Include column names in output file
#% guisection: Files & format
#%end
import sys
import os
import threading
from grass.script import core as grass
class TrThread(threading.Thread):
def __init__(self, ifs, inf, outf):
threading.Thread.__init__(self)
self.ifs = ifs
self.inf = inf
self.outf = outf
def run(self):
while True:
line = self.inf.readline()
if not line:
break
line = line.replace(self.ifs, ' ')
self.outf.write(line)
self.outf.flush()
self.outf.close()
def main():
input = options['input']
output = options['output']
fs = options['fs']
proj_in = options['proj_input']
proj_out = options['proj_output']
ll_in = flags['i']
ll_out = flags['o']
decimal = flags['d']
copy_input = flags['e']
include_header = flags['c']
#### check for cs2cs
if not grass.find_program('cs2cs'):
grass.fatal(_("cs2cs program not found, install PROJ.4 first: http://proj.maptools.org"))
#### check for overenthusiasm
if proj_in and ll_in:
grass.fatal(_("Choose only one input parameter method"))
if proj_out and ll_out:
grass.fatal(_("Choose only one output parameter method"))
if ll_in and ll_out:
grass.fatal(_("Choise only one auto-projection parameter method"))
if output and not grass.overwrite() and os.path.exists(output):
grass.fatal(_("Output file already exists"))
#### parse field separator
# FIXME: input_x,y needs to split on multiple whitespace between them
if fs == ',':
ifs = ofs = ','
else:
try:
ifs, ofs = fs.split(',')
except ValueError:
ifs = ofs = fs
ifs = ifs.lower()
ofs = ofs.lower()
if ifs in ('space', 'tab'):
ifs = ' '
elif ifs == 'comma':
ifs = ','
else:
if len(ifs) > 1:
grass.warning(_("Invalid field separator, using '%s'") % ifs[0])
try:
ifs = ifs[0]
except IndexError:
grass.fatal(_("Invalid field separator '%s'") % ifs)
if ofs.lower() == 'space':
ofs = ' '
elif ofs.lower() == 'tab':
ofs = '\t'
elif ofs.lower() == 'comma':
ofs = ','
else:
if len(ofs) > 1:
grass.warning(_("Invalid field separator, using '%s'") % ofs[0])
try:
ofs = ofs[0]
except IndexError:
grass.fatal(_("Invalid field separator '%s'") % ifs)
#### set up projection params
s = grass.read_command("g.proj", flags='j')
kv = grass.parse_key_val(s)
if "XY location" in kv['+proj'] and (ll_in or ll_out):
grass.fatal(_("Unable to project to or from a XY location"))
in_proj = None
if ll_in:
in_proj = "+proj=longlat +datum=WGS84"
grass.verbose("Assuming LL WGS84 as input, current projection as output ")
if ll_out:
in_proj = grass.read_command('g.proj', flags = 'jf')
if proj_in:
in_proj = proj_in
if not in_proj:
grass.verbose("Assuming current location as input")
in_proj = grass.read_command('g.proj', flags = 'jf')
in_proj = in_proj.strip()
grass.verbose("Input parameters: '%s'" % in_proj)
out_proj = None
if ll_out:
out_proj = "+proj=longlat +datum=WGS84"
grass.verbose("Assuming current projection as input, LL WGS84 as output ")
if ll_in:
out_proj = grass.read_command('g.proj', flags = 'jf')
if proj_out:
out_proj = proj_out
if not out_proj:
grass.fatal(_("Missing output projection parameters "))
out_proj = out_proj.strip()
grass.verbose("Output parameters: '%s'" % out_proj)
#### set up input file
if input == '-':
infile = None
inf = sys.stdin
else:
infile = input
if not os.path.exists(infile):
grass.fatal(_("Unable to read input data"))
inf = file(infile)
grass.debug("input file=[%s]" % infile)
#### set up output file
if not output:
outfile = None
outf = sys.stdout
else:
outfile = output
outf = open(outfile, 'w')
grass.debug("output file=[%s]" % outfile)
#### set up output style
if not decimal:
outfmt = ["-w5"]
else:
outfmt = ["-f", "%.8f"]
if not copy_input:
copyinp = []
else:
copyinp = ["-E"]
#### do the conversion
# Convert cs2cs DMS format to GRASS DMS format:
# cs2cs | sed -e 's/d/:/g' -e "s/'/:/g" -e 's/"//g'
cmd = ['cs2cs'] + copyinp + outfmt + in_proj.split() + ['+to'] + out_proj.split()
p = grass.Popen(cmd, stdin = grass.PIPE, stdout = grass.PIPE)
tr = TrThread(ifs, inf, p.stdin)
tr.start()
if not copy_input:
if include_header:
outf.write("x%sy%sz\n" % (ofs, ofs))
for line in p.stdout:
xy, z = line.split(' ', 1)
x, y = xy.split('\t')
outf.write('%s%s%s%s%s\n' % \
(x.strip(), ofs, y.strip(), ofs, z.strip()))
else:
if include_header:
outf.write("input_x%sinput_y%sx%sy%sz\n" % (ofs, ofs, ofs, ofs))
for line in p.stdout:
inXYZ, x, rest = line.split('\t')
inX, inY = inXYZ.split(' ')[:2]
y, z = rest.split(' ', 1)
outf.write('%s%s%s%s%s%s%s%s%s\n' % \
(inX.strip(), ofs, inY.strip(), ofs, x.strip(), \
ofs, y.strip(), ofs, z.strip()))
p.wait()
if p.returncode != 0:
grass.warning(_("Projection transform probably failed, please investigate"))
if __name__ == "__main__":
options, flags = grass.parser()
main()
|
agpl-3.0
|
nathanielvarona/airflow
|
airflow/providers/google/cloud/hooks/gcs.py
|
1
|
50042
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains a Google Cloud Storage hook."""
import functools
import gzip as gz
import os
import shutil
import time
import warnings
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from io import BytesIO
from os import path
from tempfile import NamedTemporaryFile
from typing import Callable, List, Optional, Sequence, Set, Tuple, TypeVar, Union, cast
from urllib.parse import urlparse
from google.api_core.exceptions import NotFound
from google.cloud import storage
from google.cloud.exceptions import GoogleCloudError
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils import timezone
from airflow.version import version
RT = TypeVar('RT') # pylint: disable=invalid-name
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
# Use default timeout from google-cloud-storage
DEFAULT_TIMEOUT = 60
def _fallback_object_url_to_object_name_and_bucket_name(
object_url_keyword_arg_name='object_url',
bucket_name_keyword_arg_name='bucket_name',
object_name_keyword_arg_name='object_name',
) -> Callable[[T], T]:
"""
Decorator factory that convert object URL parameter to object name and bucket name parameter.
:param object_url_keyword_arg_name: Name of the object URL parameter
:type object_url_keyword_arg_name: str
:param bucket_name_keyword_arg_name: Name of the bucket name parameter
:type bucket_name_keyword_arg_name: str
:param object_name_keyword_arg_name: Name of the object name parameter
:type object_name_keyword_arg_name: str
:return: Decorator
"""
def _wrapper(func: T):
@functools.wraps(func)
def _inner_wrapper(self: "GCSHook", *args, **kwargs) -> RT:
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
object_url = kwargs.get(object_url_keyword_arg_name)
bucket_name = kwargs.get(bucket_name_keyword_arg_name)
object_name = kwargs.get(object_name_keyword_arg_name)
if object_url and bucket_name and object_name:
raise AirflowException(
"The mutually exclusive parameters. `object_url`, `bucket_name` together "
"with `object_name` parameters are present. "
"Please provide `object_url` or `bucket_name` and `object_name`."
)
if object_url:
bucket_name, object_name = _parse_gcs_url(object_url)
kwargs[bucket_name_keyword_arg_name] = bucket_name
kwargs[object_name_keyword_arg_name] = object_name
del kwargs[object_url_keyword_arg_name]
if not object_name or not bucket_name:
raise TypeError(
f"{func.__name__}() missing 2 required positional arguments: "
f"'{bucket_name_keyword_arg_name}' and '{object_name_keyword_arg_name}' "
f"or {object_url_keyword_arg_name}"
)
if not object_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{object_name_keyword_arg_name}'"
)
if not bucket_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{bucket_name_keyword_arg_name}'"
)
return func(self, *args, **kwargs)
return cast(T, _inner_wrapper)
return _wrapper
class GCSHook(GoogleBaseHook):
"""
Interact with Google Cloud Storage. This hook uses the Google Cloud
connection.
"""
_conn = None # type: Optional[storage.Client]
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
google_cloud_storage_conn_id: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=2,
)
gcp_conn_id = google_cloud_storage_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> storage.Client:
"""Returns a Google Cloud Storage service object."""
if not self._conn:
self._conn = storage.Client(
credentials=self._get_credentials(), client_info=self.client_info, project=self.project_id
)
return self._conn
def copy(
self,
source_bucket: str,
source_object: str,
destination_bucket: Optional[str] = None,
destination_object: Optional[str] = None,
) -> None:
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' % (source_bucket, source_object)
)
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
destination_object = source_bucket.copy_blob( # type: ignore[attr-defined]
blob=source_object, destination_bucket=destination_bucket, new_name=destination_object
)
self.log.info(
'Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object.name, # type: ignore[union-attr]
destination_bucket.name, # type: ignore[union-attr]
)
def rewrite(
self,
source_bucket: str,
source_object: str,
destination_bucket: str,
destination_object: Optional[str] = None,
) -> None:
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' % (source_bucket, source_object)
)
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(blob_name=source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object)
self.log.info('Total Bytes: %s | Bytes Written: %s', total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object, token=token)
self.log.info('Total Bytes: %s | Bytes Written: %s', total_bytes, bytes_rewritten)
self.log.info(
'Object %s in bucket %s rewritten to object %s in bucket %s',
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object,
destination_bucket.name, # type: ignore[attr-defined]
)
def download(
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> Union[str, bytes]:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
:param chunk_size: Blob chunk size.
:type chunk_size: int
:param timeout: Request timeout in seconds.
:type timeout: int
:param num_max_attempts: Number of attempts to download the file.
:type num_max_attempts: int
"""
# TODO: future improvement check file size before downloading,
# to check for local space availability
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename:
blob.download_to_filename(filename, timeout=timeout)
self.log.info('File downloaded to %s', filename)
return filename
else:
return blob.download_as_string()
except GoogleCloudError:
if num_file_attempts == num_max_attempts:
self.log.error(
'Download attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file(
self,
bucket_name: Optional[str] = None,
object_name: Optional[str] = None,
object_url: Optional[str] = None, # pylint: disable=unused-argument
):
"""
Downloads the file to a temporary directory and returns a file handle
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param object_url: File reference url. Must start with "gs: //"
:type object_url: str
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
self.download(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
tmp_file.flush()
yield tmp_file
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file_and_upload(
self,
bucket_name: Optional[str] = None,
object_name: Optional[str] = None,
object_url: Optional[str] = None, # pylint: disable=unused-argument
):
"""
Creates temporary file, returns a file handle and uploads the files content
on close.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param object_url: File reference url. Must start with "gs: //"
:type object_url: str
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
yield tmp_file
tmp_file.flush()
self.upload(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
def upload( # pylint: disable=too-many-arguments
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
data: Optional[Union[str, bytes]] = None,
mime_type: Optional[str] = None,
gzip: bool = False,
encoding: str = 'utf-8',
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: int = 1,
) -> None:
"""
Uploads a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param data: The file's data as a string or bytes to be uploaded.
:type data: str
:param mime_type: The file's mime type set when uploading the file.
:type mime_type: str
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param encoding: bytes encoding for file data if provided as string
:type encoding: str
:param chunk_size: Blob chunk size.
:type chunk_size: int
:param timeout: Request timeout in seconds.
:type timeout: int
:param num_max_attempts: Number of attempts to try to upload the file.
:type num_max_attempts: int
"""
def _call_with_retry(f: Callable[[], None]) -> None:
"""Helper functions to upload a file or a string with a retry mechanism and exponential back-off.
:param f: Callable that should be retried.
:type f: Callable[[], None]
"""
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
f()
except GoogleCloudError as e:
if num_file_attempts == num_max_attempts:
self.log.error(
'Upload attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise e
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename and data:
raise ValueError(
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
elif filename:
if not mime_type:
mime_type = 'application/octet-stream'
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
_call_with_retry(
partial(blob.upload_from_filename, filename=filename, content_type=mime_type, timeout=timeout)
)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
elif data:
if not mime_type:
mime_type = 'text/plain'
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
_call_with_retry(partial(blob.upload_from_string, data, content_type=mime_type, timeout=timeout))
self.log.info('Data stream uploaded to %s in %s bucket', object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. One is required to upload to gcs.")
def exists(self, bucket_name: str, object_name: str) -> bool:
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
def get_blob_update_time(self, bucket_name: str, object_name: str):
"""
Get the update time of a file in Google Cloud Storage
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError(f"Object ({object_name}) not found in Bucket ({bucket_name})")
return blob.updated
def is_updated_after(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(
self, bucket_name: str, object_name: str, min_ts: datetime, max_ts: datetime
) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param min_ts: The minimum timestamp to check against.
:type min_ts: datetime.datetime
:param max_ts: The maximum timestamp to check against.
:type max_ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=timezone.utc)
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name: str, object_name: str, seconds: int) -> bool:
"""
Check if object is older than given time
:param bucket_name: The Google Cloud Storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param seconds: The time in seconds to check against
:type seconds: int
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name: str, object_name: str) -> None:
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name)
def delete_bucket(self, bucket_name: str, force: bool = False) -> None:
"""
Delete a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket which will be deleted
:type bucket_name: str
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
:type: bool
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
self.log.info("Deleting %s bucket", bucket_name)
try:
bucket.delete(force=force)
self.log.info("Bucket %s has been deleted", bucket_name)
except NotFound:
self.log.info("Bucket %s not exists", bucket_name)
def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None) -> list:
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def list_by_timespan(
self,
bucket_name: str,
timespan_start: datetime,
timespan_end: datetime,
versions: bool = None,
max_results: int = None,
prefix: str = None,
delimiter: str = None,
) -> list:
"""
List all objects from the bucket with the give string prefix in name that were
updated in the time between ``timespan_start`` and ``timespan_end``.
:param bucket_name: bucket name
:type bucket_name: str
:param timespan_start: will return objects that were updated at or after this datetime (UTC)
:type timespan_start: datetime
:param timespan_end: will return objects that were updated before this datetime (UTC)
:type timespan_end: datetime
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
if timespan_start <= blob.updated.replace(tzinfo=timezone.utc) < timespan_end:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def get_size(self, bucket_name: str, object_name: str) -> int:
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size
def get_crc32c(self, bucket_name: str, object_name: str):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info(
'Retrieving the crc32c checksum of object_name: %s in bucket_name: %s',
object_name,
bucket_name,
)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c
def get_md5hash(self, bucket_name: str, object_name: str) -> str:
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
@GoogleBaseHook.fallback_to_default_project_id
def create_bucket(
self,
bucket_name: str,
resource: Optional[dict] = None,
storage_class: str = 'MULTI_REGIONAL',
location: str = 'US',
project_id: Optional[str] = None,
labels: Optional[dict] = None,
) -> str:
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the Google Cloud Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info(
'Creating Bucket: %s; Location: %s; Storage Class: %s', bucket_name, location, storage_class
)
# Add airflow-version label to the bucket
labels = labels or {}
labels['airflow-version'] = 'v' + version.replace('.', '-').replace('+', '-')
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property( # pylint: disable=protected-access
name=item, value=resource[item] # type: ignore[index]
)
bucket.storage_class = storage_class
bucket.labels = labels
bucket.create(project=project_id, location=location)
return bucket.id
def insert_bucket_acl(
self, bucket_name: str, entity: str, role: str, user_project: Optional[str] = None
) -> None:
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry in bucket: %s', bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info('A new ACL entry created in bucket: %s', bucket_name)
def insert_object_acl(
self,
bucket_name: str,
object_name: str,
entity: str,
role: str,
generation: Optional[int] = None,
user_project: Optional[str] = None,
) -> None:
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:type object_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:type role: str
:param generation: Optional. If present, selects a specific revision of this object.
:type generation: long
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry for object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name, generation=generation)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info('A new ACL entry created for object: %s in bucket: %s', object_name, bucket_name)
def compose(self, bucket_name: str, source_objects: List, destination_object: str) -> None:
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
"""
if not source_objects:
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s", source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[bucket.blob(blob_name=source_object) for source_object in source_objects]
)
self.log.info("Completed successfully.")
def sync(
self,
source_bucket: str,
destination_bucket: str,
source_object: Optional[str] = None,
destination_object: Optional[str] = None,
recursive: bool = True,
allow_overwrite: bool = False,
delete_extra_files: bool = False,
) -> None:
"""
Synchronizes the contents of the buckets.
Parameters ``source_object`` and ``destination_object`` describe the root sync directories. If they
are not passed, the entire bucket will be synchronized. If they are passed, they should point
to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
:param source_bucket: The name of the bucket containing the source objects.
:type source_bucket: str
:param destination_bucket: The name of the bucket containing the destination objects.
:type destination_bucket: str
:param source_object: The root sync directory in the source bucket.
:type source_object: Optional[str]
:param destination_object: The root sync directory in the destination bucket.
:type destination_object: Optional[str]
:param recursive: If True, subdirectories will be considered
:type recursive: bool
:param recursive: If True, subdirectories will be considered
:type recursive: bool
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:type allow_overwrite: bool
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:type delete_extra_files: bool
:return: none
"""
client = self.get_conn()
# Create bucket object
source_bucket_obj = client.bucket(source_bucket)
destination_bucket_obj = client.bucket(destination_bucket)
# Normalize parameters when they are passed
source_object = self._normalize_directory_path(source_object)
destination_object = self._normalize_directory_path(destination_object)
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
# Prepare synchronization plan
to_copy_blobs, to_delete_blobs, to_rewrite_blobs = self._prepare_sync_plan(
source_bucket=source_bucket_obj,
destination_bucket=destination_bucket_obj,
source_object=source_object,
destination_object=destination_object,
recursive=recursive,
)
self.log.info(
"Planned synchronization. To delete blobs count: %s, to upload blobs count: %s, "
"to rewrite blobs count: %s",
len(to_delete_blobs),
len(to_copy_blobs),
len(to_rewrite_blobs),
)
# Copy missing object to new bucket
if not to_copy_blobs:
self.log.info("Skipped blobs copying.")
else:
for blob in to_copy_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.copy(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs copied.")
# Delete redundant files
if not to_delete_blobs:
self.log.info("Skipped blobs deleting.")
elif delete_extra_files:
# TODO: Add batch. I tried to do it, but the Google library is not stable at the moment.
for blob in to_delete_blobs:
self.delete(blob.bucket.name, blob.name)
self.log.info("Blobs deleted.")
# Overwrite files that are different
if not to_rewrite_blobs:
self.log.info("Skipped blobs overwriting.")
elif allow_overwrite:
for blob in to_rewrite_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs rewritten.")
self.log.info("Synchronization finished.")
def _calculate_sync_destination_path(
self, blob: storage.Blob, destination_object: Optional[str], source_object_prefix_len: int
) -> str:
return (
path.join(destination_object, blob.name[source_object_prefix_len:])
if destination_object
else blob.name[source_object_prefix_len:]
)
def _normalize_directory_path(self, source_object: Optional[str]) -> Optional[str]:
return source_object + "/" if source_object and not source_object.endswith("/") else source_object
@staticmethod
def _prepare_sync_plan(
source_bucket: storage.Bucket,
destination_bucket: storage.Bucket,
source_object: Optional[str],
destination_object: Optional[str],
recursive: bool,
) -> Tuple[Set[storage.Blob], Set[storage.Blob], Set[storage.Blob]]:
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
destination_object_prefix_len = len(destination_object) if destination_object else 0
delimiter = "/" if not recursive else None
# Fetch blobs list
source_blobs = list(source_bucket.list_blobs(prefix=source_object, delimiter=delimiter))
destination_blobs = list(
destination_bucket.list_blobs(prefix=destination_object, delimiter=delimiter)
)
# Create indexes that allow you to identify blobs based on their name
source_names_index = {a.name[source_object_prefix_len:]: a for a in source_blobs}
destination_names_index = {a.name[destination_object_prefix_len:]: a for a in destination_blobs}
# Create sets with names without parent object name
source_names = set(source_names_index.keys())
destination_names = set(destination_names_index.keys())
# Determine objects to copy and delete
to_copy = source_names - destination_names
to_delete = destination_names - source_names
to_copy_blobs = {source_names_index[a] for a in to_copy} # type: Set[storage.Blob]
to_delete_blobs = {destination_names_index[a] for a in to_delete} # type: Set[storage.Blob]
# Find names that are in both buckets
names_to_check = source_names.intersection(destination_names)
to_rewrite_blobs = set() # type: Set[storage.Blob]
# Compare objects based on crc32
for current_name in names_to_check:
source_blob = source_names_index[current_name]
destination_blob = destination_names_index[current_name]
# if the objects are different, save it
if source_blob.crc32c != destination_blob.crc32c:
to_rewrite_blobs.add(source_blob)
return to_copy_blobs, to_delete_blobs, to_rewrite_blobs
def gcs_object_is_directory(bucket: str) -> bool:
"""
Return True if given Google Cloud Storage URL (gs://<bucket>/<blob>)
is a directory or an empty bucket. Otherwise return False.
"""
_, blob = _parse_gcs_url(bucket)
return len(blob) == 0 or blob.endswith('/')
def _parse_gcs_url(gsurl: str) -> Tuple[str, str]:
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
if parsed_url.scheme.lower() != "gs":
raise AirflowException(f"Schema must be to 'gs://': Current schema: '{parsed_url.scheme}://'")
bucket = parsed_url.netloc
# Remove leading '/' but NOT trailing one
blob = parsed_url.path.lstrip('/')
return bucket, blob
|
apache-2.0
|
debugger22/sympy
|
sympy/matrices/expressions/tests/test_transpose.py
|
71
|
1194
|
from sympy.functions import adjoint, conjugate, transpose
from sympy.matrices.expressions import MatrixSymbol, Adjoint, trace, Transpose
from sympy.matrices import eye, Matrix
from sympy import symbols, S
from sympy import refine, Q
n, m, l, k, p = symbols('n m l k p', integer=True)
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', m, l)
C = MatrixSymbol('C', n, n)
def test_transpose():
Sq = MatrixSymbol('Sq', n, n)
assert transpose(A) == Transpose(A)
assert Transpose(A).shape == (m, n)
assert Transpose(A*B).shape == (l, n)
assert transpose(Transpose(A)) == A
assert isinstance(Transpose(Transpose(A)), Transpose)
assert adjoint(Transpose(A)) == Adjoint(Transpose(A))
assert conjugate(Transpose(A)) == Adjoint(A)
assert Transpose(eye(3)).doit() == eye(3)
assert Transpose(S(5)).doit() == S(5)
assert Transpose(Matrix([[1, 2], [3, 4]])).doit() == Matrix([[1, 3], [2, 4]])
assert transpose(trace(Sq)) == trace(Sq)
assert trace(Transpose(Sq)) == trace(Sq)
assert Transpose(Sq)[0, 1] == Sq[1, 0]
assert Transpose(A*B).doit() == Transpose(B) * Transpose(A)
def test_refine():
assert refine(C.T, Q.symmetric(C)) == C
|
bsd-3-clause
|
lupyuen/RaspberryPiImage
|
home/pi/GrovePi/Software/Python/others/temboo/Library/LittleSis/Relationship/GetBatchRelationships.py
|
5
|
4101
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetBatchRelationships
# Retrieves information about a batch of relationships in LittleSis according to the relationship IDs provided.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetBatchRelationships(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetBatchRelationships Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetBatchRelationships, self).__init__(temboo_session, '/Library/LittleSis/Relationship/GetBatchRelationships')
def new_input_set(self):
return GetBatchRelationshipsInputSet()
def _make_result_set(self, result, path):
return GetBatchRelationshipsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetBatchRelationshipsChoreographyExecution(session, exec_id, path)
class GetBatchRelationshipsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetBatchRelationships
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from LittleSis.org.)
"""
super(GetBatchRelationshipsInputSet, self)._set_input('APIKey', value)
def set_Details(self, value):
"""
Set the value of the Details input for this Choreo. ((optional, integer) Indicate 1 to include details for each relationship record returned. Otherwise, only a basic record will be returned.)
"""
super(GetBatchRelationshipsInputSet, self)._set_input('Details', value)
def set_RelationshipIDs(self, value):
"""
Set the value of the RelationshipIDs input for this Choreo. ((required, string) The IDs of the relationship records to be returned as a comma delimited string.)
"""
super(GetBatchRelationshipsInputSet, self)._set_input('RelationshipIDs', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Format of the response returned by LittleSis.org. Acceptable inputs: xml or json. Defaults to xml)
"""
super(GetBatchRelationshipsInputSet, self)._set_input('ResponseFormat', value)
class GetBatchRelationshipsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetBatchRelationships Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LittleSis.org.)
"""
return self._output.get('Response', None)
class GetBatchRelationshipsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetBatchRelationshipsResultSet(response, path)
|
apache-2.0
|
hugegreenbug/libgestures
|
include/build/android/gyp/jar_toc.py
|
25
|
3110
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a TOC file from a Java jar.
The TOC file contains the non-package API of the jar. This includes all
public/protected classes/functions/members and the values of static final
variables. Some other information (major/minor javac version) is also included.
This TOC file then can be used to determine if a dependent library should be
rebuilt when this jar changes. I.e. any change to the jar that would require a
rebuild, will have a corresponding change in the TOC file.
"""
import optparse
import os
import re
import sys
import zipfile
from util import build_utils
from util import md5_check
def GetClassesInZipFile(zip_file):
classes = []
files = zip_file.namelist()
for f in files:
if f.endswith('.class'):
# f is of the form org/chromium/base/Class$Inner.class
classes.append(f.replace('/', '.')[:-6])
return classes
def CallJavap(classpath, classes):
javap_cmd = [
'javap',
'-protected', # In reality both public & protected.
# -verbose is required to get constant values (which can be inlined in
# dependents).
'-verbose',
'-classpath', classpath
] + classes
return build_utils.CheckOutput(javap_cmd)
def ExtractToc(disassembled_classes):
# javap output is structured by indent (2-space) levels.
good_patterns = [
'^[^ ]', # This includes all class/function/member signatures.
'^ SourceFile:',
'^ minor version:',
'^ major version:',
'^ Constant value:',
]
bad_patterns = [
'^const #', # Matches the constant pool (i.e. literals used in the class).
]
def JavapFilter(line):
return (re.match('|'.join(good_patterns), line) and
not re.match('|'.join(bad_patterns), line))
toc = filter(JavapFilter, disassembled_classes.split('\n'))
return '\n'.join(toc)
def UpdateToc(jar_path, toc_path):
classes = GetClassesInZipFile(zipfile.ZipFile(jar_path))
javap_output = CallJavap(classpath=jar_path, classes=classes)
toc = ExtractToc(javap_output)
with open(toc_path, 'w') as tocfile:
tocfile.write(toc)
def DoJarToc(options):
jar_path = options.jar_path
toc_path = options.toc_path
record_path = '%s.md5.stamp' % toc_path
md5_check.CallAndRecordIfStale(
lambda: UpdateToc(jar_path, toc_path),
record_path=record_path,
input_paths=[jar_path],
)
build_utils.Touch(toc_path)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--jar-path', help='Input .jar path.')
parser.add_option('--toc-path', help='Output .jar.TOC path.')
parser.add_option('--stamp', help='Path to touch on success.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='Ignored.')
options, _ = parser.parse_args()
DoJarToc(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
|
squarenomad/historia
|
backend/manage.py
|
1
|
1531
|
#!/usr/bin/env python3
from app import create_app, db, graph, forge
from app.email import send_email
from app.models import Person, Link
from app.faker import fake
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
import os
app = create_app(os.getenv('HISTORIA_CONFIG') or 'default')
# Instantiate extensions that modify app-runtimes here
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def create_db():
db.create_all()
# Attach functions to app-runtime flags
def make_shell_context():
return dict(
db=db,
Person=Person,
Link=Link,
graph=graph,
app=app,
send_email=send_email,
forge=forge,
fake=fake
)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('forge', forge)
if __name__ == '__main__':
with app.app_context():
basedir = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.join(basedir, 'data')
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if basedir in app.config['SQLALCHEMY_DATABASE_URI']:
if not os.path.exists(app.config['SQLALCHEMY_DATABASE_URI']):
print('No development database is present!')
manager.run()
|
mit
|
kdwink/intellij-community
|
python/lib/Lib/mailbox.py
|
80
|
74677
|
#! /usr/bin/env python
"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import sys
import os
import time
import calendar
import socket
import errno
import copy
import email
import email.Message
import email.Generator
import rfc822
import StringIO
try:
if sys.platform == 'os2emx':
# OS/2 EMX fcntl() not adequate
raise ImportError
import fcntl
except ImportError:
fcntl = None
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage', 'UnixMailbox',
'PortableUnixMailbox', 'MmdfMailbox', 'MHMailbox', 'BabylMailbox' ]
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
return self._factory(self.get_file(key))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __contains__(self, key):
return self.has_key(key)
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.iterkeys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
# used in strings and by email.Message are translated here.
"""Dump message contents to target file."""
if isinstance(message, email.Message.Message):
buffer = StringIO.StringIO()
gen = email.Generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
target.write(buffer.read().replace('\n', os.linesep))
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
elif hasattr(message, 'read'):
while True:
line = message.readline()
if line == '':
break
if mangle_from_ and line.startswith('From '):
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
os.mkdir(os.path.join(self._path, 'tmp'), 0700)
os.mkdir(os.path.join(self._path, 'new'), 0700)
os.mkdir(os.path.join(self._path, 'cur'), 0700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
finally:
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError, e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
if isinstance(message, MaildirMessage):
os.utime(dest, (os.path.getatime(dest), message.get_date()))
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except KeyError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
raise
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
new_path = os.path.join(self._path, subdir, key + suffix)
os.rename(os.path.join(self._path, temp_subpath), new_path)
if isinstance(message, MaildirMessage):
os.utime(new_path, (os.path.getatime(new_path),
message.get_date()))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
f = open(os.path.join(self._path, subpath), 'r')
try:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
finally:
f.close()
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'r')
try:
return f.read()
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
return # Maildir changes are always written immediately.
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except OSError, e:
if e.errno == errno.ENOENT:
Maildir._count += 1
try:
return _create_carefully(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
self._toc = {}
for subdir in ('new', 'cur'):
subdir_path = os.path.join(self._path, subdir)
for entry in os.listdir(subdir_path):
p = os.path.join(subdir_path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[self._onetime_keys.next()]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno == errno.EACCES:
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
self._pending = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
for key in self._toc.keys():
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
return
self._lookup()
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if buffer == '':
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
try:
os.rename(new_file.name, self._path)
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(self._path)
os.rename(new_file.name, self._path)
else:
raise
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
self.flush()
if self._locked:
self.unlock()
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
self._file.flush()
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(os.linesep, '')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(os.linesep, '\n'))
msg.set_from(from_line[5:])
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(os.linesep, '\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str) and message.startswith('From '):
newline = message.find('\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = ''
elif isinstance(message, _mboxMMDFMessage):
from_line = 'From ' + message.get_from()
elif isinstance(message, email.Message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is None:
from_line = 'From MAILER-DAEMON %s' % time.asctime(time.gmtime())
start = self._file.tell()
self._file.write(from_line + os.linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
if f.tell() != 0:
f.write(os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith('From '):
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(line_pos)
elif line == '':
stops.append(line_pos)
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\001\001\001\001' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\001\001\001\001' + os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith('\001\001\001\001' + os.linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\001\001\001\001' + os.linesep:
stops.append(line_pos - len(os.linesep))
break
elif line == '':
stops.append(line_pos)
break
elif line == '':
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
try:
if self._locked:
_lock_file(f)
try:
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
f.close()
os.remove(os.path.join(self._path, str(key)))
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences():
if key in key_list:
msg.add_sequence(name)
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
return f.read()
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
self._file.close()
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
f = open(os.path.join(self._path, '.mh_sequences'), 'r')
try:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
finally:
f.close()
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.iteritems():
if len(keys) == 0:
continue
f.write('%s:' % name)
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.iteritems():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'))
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
visible_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
visible_headers.write(line.replace(os.linesep, '\n'))
body = self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
return original_headers.getvalue() + \
self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\037\014' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(',')
if label.strip() != '']
label_lists.append(labels)
elif line == '\037' or line == '\037' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
elif line == '':
stops.append(line_pos - len(os.linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' %
(os.linesep, os.linesep, ','.join(self.get_labels()),
os.linesep))
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\014' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write('1')
for label in special_labels:
self._file.write(', ' + label)
self._file.write(',,')
for label in labels:
self._file.write(' ' + label + ',')
self._file.write(os.linesep)
else:
self._file.write('1,,' + os.linesep)
if isinstance(message, email.Message.Message):
orig_buffer = StringIO.StringIO()
orig_generator = email.Generator.Generator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
self._file.write('*** EOOH ***' + os.linesep)
if isinstance(message, BabylMessage):
vis_buffer = StringIO.StringIO()
vis_generator = email.Generator.Generator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
elif isinstance(message, str):
body_start = message.find('\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write('*** EOOH ***' + os.linesep)
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write(message[body_start:].replace('\n',
os.linesep))
else:
self._file.write('*** EOOH ***' + os.linesep + os.linesep)
self._file.write(message.replace('\n', os.linesep))
elif hasattr(message, 'readline'):
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
self._file.write('*** EOOH ***' + os.linesep)
if first_pass:
first_pass = False
message.seek(original_pos)
else:
break
while True:
buffer = message.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.Message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.Message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_file(message))
elif message is None:
email.Message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
for name in ('_headers', '_unixfrom', '_payload', '_charset',
'preamble', 'epilogue', 'defects', '_default_type'):
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags() != '':
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.Message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence must be a string: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
def __init__(self, message=None):
"""Initialize an BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
return iter(self.readline, "")
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
self._file.close()
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return ''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
if e.errno in (errno.EAGAIN, errno.EACCES):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except IOError, e:
if e.errno == errno.EACCES:
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
else:
raise
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
## Start: classes from the original module (for backward compatibility).
# Note that the Maildir class, whose name is unchanged, itself offers a next()
# method for backward compatibility.
class _Mailbox:
def __init__(self, fp, factory=rfc822.Message):
self.fp = fp
self.seekp = 0
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
while 1:
self.fp.seek(self.seekp)
try:
self._search_start()
except EOFError:
self.seekp = self.fp.tell()
return None
start = self.fp.tell()
self._search_end()
self.seekp = stop = self.fp.tell()
if start != stop:
break
return self.factory(_PartialFile(self.fp, start, stop))
# Recommended to use PortableUnixMailbox instead!
class UnixMailbox(_Mailbox):
def _search_start(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
def _search_end(self):
self.fp.readline() # Throw away header line
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
# An overridable mechanism to test for From-line-ness. You can either
# specify a different regular expression or define a whole new
# _isrealfromline() method. Note that this only gets called for lines
# starting with the 5 characters "From ".
#
# BAW: According to
#http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
# the only portable, reliable way to find message delimiters in a BSD (i.e
# Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
# beginning of the file, "^From .*\n". While _fromlinepattern below seems
# like a good idea, in practice, there are too many variations for more
# strict parsing of the line to be completely accurate.
#
# _strict_isrealfromline() is the old version which tries to do stricter
# parsing of the From_ line. _portable_isrealfromline() simply returns
# true, since it's never called if the line doesn't already start with
# "From ".
#
# This algorithm, and the way it interacts with _search_start() and
# _search_end() may not be completely correct, because it doesn't check
# that the two characters preceding "From " are \n\n or the beginning of
# the file. Fixing this would require a more extensive rewrite than is
# necessary. For convenience, we've added a PortableUnixMailbox class
# which does no checking of the format of the 'From' line.
_fromlinepattern = (r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+"
r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*"
r"[^\s]*\s*"
"$")
_regexp = None
def _strict_isrealfromline(self, line):
if not self._regexp:
import re
self._regexp = re.compile(self._fromlinepattern)
return self._regexp.match(line)
def _portable_isrealfromline(self, line):
return True
_isrealfromline = _strict_isrealfromline
class PortableUnixMailbox(UnixMailbox):
_isrealfromline = UnixMailbox._portable_isrealfromline
class MmdfMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == '\001\001\001\001\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\001\001\001\001\n':
self.fp.seek(pos)
return
class MHMailbox:
def __init__(self, dirname, factory=rfc822.Message):
import re
pat = re.compile('^[1-9][0-9]*$')
self.dirname = dirname
# the three following lines could be combined into:
# list = map(long, filter(pat.match, os.listdir(self.dirname)))
list = os.listdir(self.dirname)
list = filter(pat.match, list)
list = map(long, list)
list.sort()
# This only works in Python 1.6 or later;
# before that str() added 'L':
self.boxes = map(str, list)
self.boxes.reverse()
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
if not self.boxes:
return None
fn = self.boxes.pop()
fp = open(os.path.join(self.dirname, fn))
msg = self.factory(fp)
try:
msg._mh_msgno = fn
except (AttributeError, TypeError):
pass
return msg
class BabylMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line == '*** EOOH ***\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\037\014\n' or line == '\037':
self.fp.seek(pos)
return
## End: classes from the original module (for backward compatibility).
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
|
apache-2.0
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-62/files/myenv/lib/python2.7/site-packages/pip/commands/completion.py
|
343
|
2453
|
from __future__ import absolute_import
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
""", 'fish': """
function __fish_complete_pip
set -lx COMP_WORDS (commandline -o) ""
set -lx COMP_CWORD (math (contains -i -- (commandline -t) $COMP_WORDS)-1)
set -lx PIP_AUTO_COMPLETE 1
string split \ -- (eval $COMP_WORDS[1])
end
complete -fa "(__fish_complete_pip)" -c pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command used for command completion.'
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
cmd_opts.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
cmd_opts.add_option(
'--fish', '-f',
action='store_const',
const='fish',
dest='shell',
help='Emit completion code for fish')
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
)
|
gpl-3.0
|
dudepare/django
|
django/conf/project_template/project_name/settings.py
|
271
|
3288
|
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
|
bsd-3-clause
|
caneruguz/osf.io
|
api_tests/base/test_utils.py
|
12
|
3700
|
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
import mock # noqa
import unittest
from rest_framework import fields
from rest_framework.exceptions import ValidationError
from api.base import utils as api_utils
from tests.base import ApiTestCase
from framework.status import push_status_message
class TestTruthyFalsy:
"""Check that our copy/pasted representation of
TRUTHY and FALSY match the DRF BooleanField's versions
"""
def test_truthy(self):
assert_equal(api_utils.TRUTHY, fields.BooleanField.TRUE_VALUES)
def test_falsy(self):
assert_equal(api_utils.FALSY, fields.BooleanField.FALSE_VALUES)
class TestIsDeprecated(unittest.TestCase):
def setUp(self):
super(TestIsDeprecated, self).setUp()
self.min_version = '2.0'
self.max_version = '2.5'
def test_is_deprecated(self):
request_version = '2.6'
is_deprecated = api_utils.is_deprecated(request_version, self.min_version, self.max_version)
assert_equal(is_deprecated, True)
def test_is_not_deprecated(self):
request_version = '2.5'
is_deprecated = api_utils.is_deprecated(request_version, self.min_version, self.max_version)
assert_equal(is_deprecated, False)
class TestFlaskDjangoIntegration:
def test_push_status_message_no_response(self):
status_message = 'This is a message'
statuses = ['info', 'warning', 'warn', 'success', 'danger', 'default']
for status in statuses:
try:
push_status_message(status_message, kind=status)
except:
assert_true(False, 'Exception from push_status_message via API v2 with type "{}".'.format(status))
def test_push_status_message_expected_error(self):
status_message = 'This is a message'
try:
push_status_message(status_message, kind='error')
assert_true(False, 'push_status_message() should have generated a ValidationError exception.')
except ValidationError as e:
assert_equal(e.detail[0], status_message,
'push_status_message() should have passed along the message with the Exception.')
except RuntimeError:
assert_true(False, 'push_status_message() should have caught the runtime error and replaced it.')
except:
assert_true(False, 'Exception from push_status_message when called from the v2 API with type "error"')
@mock.patch('framework.status.session')
def test_push_status_message_unexpected_error(self, mock_sesh):
status_message = 'This is a message'
exception_message = 'this is some very unexpected problem'
mock_get = mock.Mock(side_effect=RuntimeError(exception_message))
mock_data = mock.Mock()
mock_data.attach_mock(mock_get, 'get')
mock_sesh.attach_mock(mock_data, 'data')
try:
push_status_message(status_message, kind='error')
assert_true(False, 'push_status_message() should have generated a RuntimeError exception.')
except ValidationError as e:
assert_true(False, 'push_status_message() should have re-raised the RuntimeError not gotten ValidationError.')
except RuntimeError as e:
assert_equal(getattr(e, 'message', None),
exception_message,
'push_status_message() should have re-raised the '
'original RuntimeError with the original message.')
except:
assert_true(False, 'Unexpected Exception from push_status_message when called '
'from the v2 API with type "error"')
|
apache-2.0
|
thomasyu888/synapsePythonClient
|
tests/integration/synapseclient/core/upload/test_multipart_upload.py
|
1
|
7776
|
import filecmp
import hashlib
import os
import random
import requests
import string
import tempfile
import traceback
import uuid
from io import open
from unittest import mock, skip
from synapseclient import File
import synapseclient.core.config
import synapseclient.core.utils as utils
from synapseclient.core.upload.multipart_upload import (
MIN_PART_SIZE,
multipart_upload_file,
multipart_upload_string,
multipart_copy,
)
def test_round_trip(syn, project, schedule_for_cleanup):
fhid = None
filepath = utils.make_bogus_binary_file(MIN_PART_SIZE + 777771)
try:
fhid = multipart_upload_file(syn, filepath)
# Download the file and compare it with the original
junk = File(parent=project, dataFileHandleId=fhid)
junk.properties.update(syn._createEntity(junk.properties))
(tmp_f, tmp_path) = tempfile.mkstemp()
schedule_for_cleanup(tmp_path)
junk['path'] = syn._downloadFileHandle(fhid, junk['id'], 'FileEntity', tmp_path)
assert filecmp.cmp(filepath, junk.path)
finally:
try:
if 'junk' in locals():
syn.delete(junk)
except Exception:
print(traceback.format_exc())
try:
os.remove(filepath)
except Exception:
print(traceback.format_exc())
def test_single_thread_upload(syn):
synapseclient.core.config.single_threaded = True
try:
filepath = utils.make_bogus_binary_file(MIN_PART_SIZE * 2 + 1)
assert multipart_upload_file(syn, filepath) is not None
finally:
synapseclient.core.config.single_threaded = False
def test_randomly_failing_parts(syn, project, schedule_for_cleanup):
"""Verify that we can recover gracefully with some randomly inserted errors
while uploading parts."""
fail_every = 3 # fail every nth request
fail_cycle = random.randint(0, fail_every - 1) # randomly vary which n of the request cycle we fail
fhid = None
filepath = utils.make_bogus_binary_file(MIN_PART_SIZE * 2 + (MIN_PART_SIZE / 2))
put_count = 0
normal_put = requests.Session.put
def _put_chunk_or_fail_randomly(self, url, *args, **kwargs):
# fail every nth put to aws s3
if 's3.amazonaws.com' not in url:
return normal_put(self, url, *args, **kwargs)
nonlocal put_count
put_count += 1
if (put_count + fail_cycle) % fail_every == 0:
raise IOError("Ooops! Artificial upload failure for testing.")
return normal_put(self, url, *args, **kwargs)
with mock.patch('requests.Session.put', side_effect=_put_chunk_or_fail_randomly, autospec=True):
try:
fhid = multipart_upload_file(syn, filepath, part_size=MIN_PART_SIZE)
# Download the file and compare it with the original
junk = File(parent=project, dataFileHandleId=fhid)
junk.properties.update(syn._createEntity(junk.properties))
(tmp_f, tmp_path) = tempfile.mkstemp()
schedule_for_cleanup(tmp_path)
junk['path'] = syn._downloadFileHandle(fhid, junk['id'], 'FileEntity', tmp_path)
assert filecmp.cmp(filepath, junk.path)
finally:
try:
if 'junk' in locals():
syn.delete(junk)
except Exception:
print(traceback.format_exc())
try:
os.remove(filepath)
except Exception:
print(traceback.format_exc())
def test_multipart_upload_big_string(syn, project, schedule_for_cleanup):
cities = ["Seattle", "Portland", "Vancouver", "Victoria",
"San Francisco", "Los Angeles", "New York",
"Oaxaca", "Cancún", "Curaçao", "जोधपुर",
"অসম", "ལྷ་ས།", "ཐིམ་ཕུ་", "دبي", "አዲስ አበባ",
"São Paulo", "Buenos Aires", "Cartagena",
"Amsterdam", "Venice", "Rome", "Dubrovnik",
"Sarajevo", "Madrid", "Barcelona", "Paris",
"Αθήνα", "Ρόδος", "København", "Zürich",
"金沢市", "서울", "แม่ฮ่องสอน", "Москва"]
text = "Places I wanna go:\n"
while len(text.encode('utf-8')) < MIN_PART_SIZE:
text += ", ".join(random.choice(cities) for i in range(5000)) + "\n"
fhid = multipart_upload_string(syn, text)
# Download the file and compare it with the original
junk = File(parent=project, dataFileHandleId=fhid)
junk.properties.update(syn._createEntity(junk.properties))
(tmp_f, tmp_path) = tempfile.mkstemp()
schedule_for_cleanup(tmp_path)
junk['path'] = syn._downloadFileHandle(fhid, junk['id'], "FileEntity", tmp_path)
with open(junk.path, encoding='utf-8') as f:
retrieved_text = f.read()
assert retrieved_text == text
def _multipart_copy_test(syn, project, schedule_for_cleanup, part_size):
import logging
logging.basicConfig()
logging.getLogger(synapseclient.client.DEFAULT_LOGGER_NAME).setLevel(logging.DEBUG)
dest_folder_name = "test_multipart_copy_{}".format(uuid.uuid4())
# create a new folder with a storage location we own that we can copy to
dest_folder, storage_location_setting, _ = syn.create_s3_storage_location(
parent=project,
folder_name=dest_folder_name
)
part_size = part_size
file_size = int(part_size * 1.1)
base_string = ''.join(random.choices(string.ascii_lowercase, k=1024))
file_content = base_string
while len(file_content) < file_size:
file_content += base_string
part_md5_hexes = []
part_count = file_size // part_size
if file_size % part_size > 0:
part_count += 1
content_pos = 0
for _ in range(part_count):
next_pos = content_pos + part_size
part_content = file_content[content_pos:next_pos]
content_pos = next_pos
md5 = hashlib.md5(part_content.encode('utf-8'))
part_md5_hexes.append(md5.hexdigest())
tmp = tempfile.NamedTemporaryFile(delete=False)
schedule_for_cleanup(tmp.name)
with open(tmp.name, 'w') as tmp_out:
tmp_out.write(file_content)
file = File(tmp.name, parent=project)
entity = syn.store(file)
fhid = entity['dataFileHandleId']
# source_file_handle = syn._get_file_handle_as_creator(fhid)
dest_file_name = "{}_copy".format(entity.name)
source_file_handle_assocation = {
'fileHandleId': fhid,
'associateObjectId': entity.id,
'associateObjectType': 'FileEntity',
}
dest_storage_location = storage_location_setting['storageLocationId']
copied_fhid = multipart_copy(
syn,
source_file_handle_assocation,
dest_file_name,
part_size,
dest_storage_location,
)
copied_file_handle = syn._get_file_handle_as_creator(copied_fhid)
dest_file = File(
name=dest_file_name,
parent=dest_folder,
)
dest_file['dataFileHandleId'] = copied_fhid
dest_file['_file_handle'] = copied_file_handle
dest_file_entity = syn.store(dest_file)
dest_file_local = syn.get(dest_file_entity.id)
with open(dest_file_local.path, 'r') as dest_file_in:
dest_file_content = dest_file_in.read()
assert file_content == dest_file_content
def test_multipart_copy(syn, project, schedule_for_cleanup):
"""Test multi part copy using the minimum part size."""
_multipart_copy_test(syn, project, schedule_for_cleanup, MIN_PART_SIZE)
@skip("Skip in normal testing because the large size makes it slow")
def test_multipart_copy__big_parts(syn, project, schedule_for_cleanup):
_multipart_copy_test(syn, project, schedule_for_cleanup, 100 * utils.MB)
|
apache-2.0
|
bplancher/odoo
|
openerp/addons/base/ir/ir_sequence.py
|
22
|
18267
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import pytz
import time
from datetime import datetime, timedelta
from openerp import _, api, fields, models
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
def _create_sequence(cr, seq_name, number_increment, number_next):
""" Create a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise UserError(_('Step must not be zero.'))
sql = "CREATE SEQUENCE %s INCREMENT BY %%s START WITH %%s" % seq_name
cr.execute(sql, (number_increment, number_next))
def _drop_sequence(cr, seq_names):
""" Drop the PostreSQL sequence if it exists.
There is no access rights check.
"""
names = []
for n in seq_names:
names.append(n)
names = ','.join(names)
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names)
def _alter_sequence(cr, seq_name, number_increment=None, number_next=None):
""" Alter a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise UserError(_("Step must not be zero."))
cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = "ALTER SEQUENCE %s" % (seq_name, )
if number_increment is not None:
statement += " INCREMENT BY %d" % (number_increment, )
if number_next is not None:
statement += " RESTART WITH %d" % (number_next, )
cr.execute(statement)
def _select_nextval(cr, seq_name):
cr.execute("SELECT nextval('%s')" % seq_name)
return cr.fetchone()
def _update_nogap(self, number_increment):
number_next = self.number_next
self.env.cr.execute("SELECT number_next FROM %s WHERE id=%s FOR UPDATE NOWAIT" % (self._table, self.id))
self.env.cr.execute("UPDATE %s SET number_next=number_next+%s WHERE id=%s " % (self._table, number_increment, self.id))
self.invalidate_cache(['number_next'], [self.id])
return number_next
class ir_sequence(models.Model):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_order = 'name'
def _get_number_next_actual(self):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
for element in self:
if element.implementation != 'standard':
element.number_next_actual = element.number_next
else:
# get number from postgres sequence. Cannot use currval, because that might give an error when
# not having used nextval before.
query = "SELECT last_value, increment_by, is_called FROM ir_sequence_%03d" % element.id
self.env.cr.execute(query)
(last_value, increment_by, is_called) = self.env.cr.fetchone()
if is_called:
element.number_next_actual = last_value + increment_by
else:
element.number_next_actual = last_value
def _set_number_next_actual(self):
for record in self:
record.write({'number_next': record.number_next_actual or 0})
name = fields.Char(required=True)
code = fields.Char('Sequence Code')
implementation = fields.Selection(
[('standard', 'Standard'), ('no_gap', 'No gap')],
'Implementation', required=True, default='standard',
help="Two sequence object implementations are offered: Standard "
"and 'No gap'. The later is slower than the former but forbids any"
" gap in the sequence (while they are possible in the former).")
active = fields.Boolean(default=True)
prefix = fields.Char(help="Prefix value of the record for the sequence")
suffix = fields.Char(help="Suffix value of the record for the sequence")
number_next = fields.Integer('Next Number', required=True, default=1, help="Next number of this sequence")
number_next_actual = fields.Integer(compute='_get_number_next_actual', inverse='_set_number_next_actual',
required=True, string='Next Number', default=1,
help="Next number that will be used. This number can be incremented "
"frequently so the displayed value might already be obsolete")
number_increment = fields.Integer('Step', required=True, default=1,
help="The next number of the sequence will be incremented by this number")
padding = fields.Integer('Sequence Size', required=True, default=0,
help="Odoo will automatically adds some '0' on the left of the "
"'Next Number' to get the required padding size.")
company_id = fields.Many2one('res.company', 'Company',
default=lambda s: s.env['res.company']._company_default_get('ir.sequence'))
use_date_range = fields.Boolean('Use subsequences per date_range')
date_range_ids = fields.One2many('ir.sequence.date_range', 'sequence_id', 'Subsequences')
def init(self, cr):
return # Don't do the following index yet.
# CONSTRAINT/UNIQUE INDEX on (code, company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate (code, NULL) ir_sequences.
self.env.cr.execute("""
SELECT indexname FROM pg_indexes WHERE indexname =
'ir_sequence_unique_code_company_id_idx'""")
if not self.env.cr.fetchone():
self.env.cr.execute("""
CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx
ON ir_sequence (code, (COALESCE(company_id,-1)))""")
@api.model
def create(self, values):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
seq = super(ir_sequence, self).create(values)
if values.get('implementation', 'standard') == 'standard':
_create_sequence(self.env.cr, "ir_sequence_%03d" % seq.id, values.get('number_increment', 1), values.get('number_next', 1))
return seq
@api.multi
def unlink(self):
_drop_sequence(self.env.cr, ["ir_sequence_%03d" % x.id for x in self])
return super(ir_sequence, self).unlink()
@api.multi
def write(self, values):
new_implementation = values.get('implementation')
for seq in self:
# 4 cases: we test the previous impl. against the new one.
i = values.get('number_increment', seq.number_increment)
n = values.get('number_next', seq.number_next)
if seq.implementation == 'standard':
if new_implementation in ('standard', None):
# Implementation has NOT changed.
# Only change sequence if really requested.
if values.get('number_next'):
_alter_sequence(self.env.cr, "ir_sequence_%03d" % seq.id, number_next=n)
if seq.number_increment != i:
_alter_sequence(self.env.cr, "ir_sequence_%03d" % seq.id, number_increment=i)
seq.date_range_ids._alter_sequence(number_increment=i)
else:
_drop_sequence(self.env.cr, ["ir_sequence_%03d" % seq.id])
for sub_seq in seq.date_range_ids:
_drop_sequence(self.env.cr, ["ir_sequence_%03d_%03d" % (seq.id, sub_seq.id)])
else:
if new_implementation in ('no_gap', None):
pass
else:
_create_sequence(self.env.cr, "ir_sequence_%03d" % seq.id, i, n)
for sub_seq in seq.date_range_ids:
_create_sequence(self.env.cr, "ir_sequence_%03d_%03d" % (seq.id, sub_seq.id), i, n)
return super(ir_sequence, self).write(values)
def _next_do(self):
if self.implementation == 'standard':
number_next = _select_nextval(self.env.cr, 'ir_sequence_%03d' % self.id)
else:
number_next = _update_nogap(self, self.number_increment)
return self.get_next_char(number_next)
def get_next_char(self, number_next):
def _interpolate(s, d):
if s:
return s % d
return ''
def _interpolation_dict():
now = range_date = effective_date = datetime.now(pytz.timezone(self.env.context.get('tz') or 'UTC'))
if self.env.context.get('ir_sequence_date'):
effective_date = datetime.strptime(self.env.context.get('ir_sequence_date'), '%Y-%m-%d')
if self.env.context.get('ir_sequence_date_range'):
range_date = datetime.strptime(self.env.context.get('ir_sequence_date_range'), '%Y-%m-%d')
sequences = {
'year': '%Y', 'month': '%m', 'day': '%d', 'y': '%y', 'doy': '%j', 'woy': '%W',
'weekday': '%w', 'h24': '%H', 'h12': '%I', 'min': '%M', 'sec': '%S'
}
res = {}
for key, sequence in sequences.iteritems():
res[key] = effective_date.strftime(sequence)
res['range_' + key] = range_date.strftime(sequence)
res['current_' + key] = now.strftime(sequence)
return res
d = _interpolation_dict()
try:
interpolated_prefix = _interpolate(self.prefix, d)
interpolated_suffix = _interpolate(self.suffix, d)
except ValueError:
raise UserError(_('Invalid prefix or suffix for sequence \'%s\'') % (self.get('name')))
return interpolated_prefix + '%%0%sd' % self.padding % number_next + interpolated_suffix
def _create_date_range_seq(self, date):
year = fields.Date.from_string(date).strftime('%Y')
date_from = '{}-01-01'.format(year)
date_to = '{}-12-31'.format(year)
date_range = self.env['ir.sequence.date_range'].search([('sequence_id', '=', self.id), ('date_from', '>=', date), ('date_from', '<=', date_to)], order='date_from desc')
if date_range:
date_to = datetime.strptime(date_range.date_from, '%Y-%m-%d') + timedelta(days=-1)
date_to = date_to.strftime('%Y-%m-%d')
date_range = self.env['ir.sequence.date_range'].search([('sequence_id', '=', self.id), ('date_to', '>=', date_from), ('date_to', '<=', date)], order='date_to desc')
if date_range:
date_from = datetime.strptime(date_range.date_to, '%Y-%m-%d') + timedelta(days=1)
date_from = date_from.strftime('%Y-%m-%d')
seq_date_range = self.env['ir.sequence.date_range'].sudo().create({
'date_from': date_from,
'date_to': date_to,
'sequence_id': self.id,
})
return seq_date_range
def _next(self):
""" Returns the next number in the preferred sequence in all the ones given in self."""
if not self.use_date_range:
return self._next_do()
# date mode
dt = fields.Date.today()
if self.env.context.get('ir_sequence_date'):
dt = self.env.context.get('ir_sequence_date')
seq_date = self.env['ir.sequence.date_range'].search([('sequence_id', '=', self.id), ('date_from', '<=', dt), ('date_to', '>=', dt)], limit=1)
if not seq_date:
seq_date = self._create_date_range_seq(dt)
return seq_date.with_context(ir_sequence_date_range=seq_date.date_from)._next()
@api.multi
def next_by_id(self):
""" Draw an interpolated string using the specified sequence."""
self.check_access_rights('read')
return self._next()
@api.model
def next_by_code(self, sequence_code):
""" Draw an interpolated string using a sequence with the requested code.
If several sequences with the correct code are available to the user
(multi-company cases), the one from the user's current company will
be used.
:param dict context: context dictionary may contain a
``force_company`` key with the ID of the company to
use instead of the user's current company for the
sequence selection. A matching sequence for that
specific company will get higher priority.
"""
self.check_access_rights('read')
company_ids = self.env['res.company'].search([]).ids + [False]
seq_ids = self.search(['&', ('code', '=', sequence_code), ('company_id', 'in', company_ids)])
if not seq_ids:
return False
force_company = self.env.context.get('force_company')
if not force_company:
force_company = self.env.user.company_id.id
preferred_sequences = [s for s in seq_ids if s.company_id and s.company_id.id == force_company]
seq_id = preferred_sequences[0] if preferred_sequences else seq_ids[0]
return seq_id._next()
@api.model
def get_id(self, sequence_code_or_id, code_or_id='id'):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by the ``sequence_code_or_id``
argument, which can be a code or an id (as controlled by the
``code_or_id`` argument. This method is deprecated.
"""
_logger.warning("ir_sequence.get() and ir_sequence.get_id() are deprecated. "
"Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().")
if code_or_id == 'id':
return self.browse(sequence_code_or_id).next_by_id()
else:
return self.next_by_code(sequence_code_or_id)
@api.model
def get(self, code):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by its code. This method is
deprecated.
"""
return self.get_id(code, 'code')
class ir_sequence_date_range(models.Model):
_name = 'ir.sequence.date_range'
_rec_name = "sequence_id"
def _get_number_next_actual(self):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
for element in self:
if element.sequence_id.implementation != 'standard':
element.number_next_actual = element.number_next
else:
# get number from postgres sequence. Cannot use currval, because that might give an error when
# not having used nextval before.
query = "SELECT last_value, increment_by, is_called FROM ir_sequence_%03d_%03d" % (element.sequence_id.id, element.id)
self.env.cr.execute(query)
(last_value, increment_by, is_called) = self.env.cr.fetchone()
if is_called:
element.number_next_actual = last_value + increment_by
else:
element.number_next_actual = last_value
def _set_number_next_actual(self):
for record in self:
record.write({'number_next': record.number_next_actual or 0})
date_from = fields.Date('From', required=True)
date_to = fields.Date('To', required=True)
sequence_id = fields.Many2one("ir.sequence", 'Main Sequence', required=True, ondelete='cascade')
number_next = fields.Integer('Next Number', required=True, default=1, help="Next number of this sequence")
number_next_actual = fields.Integer(compute='_get_number_next_actual', inverse='_set_number_next_actual',
required=True, string='Next Number', default=1,
help="Next number that will be used. This number can be incremented "
"frequently so the displayed value might already be obsolete")
def _next(self):
if self.sequence_id.implementation == 'standard':
number_next = _select_nextval(self.env.cr, 'ir_sequence_%03d_%03d' % (self.sequence_id.id, self.id))
else:
number_next = _update_nogap(self, self.sequence_id.number_increment)
return self.sequence_id.get_next_char(number_next)
@api.multi
def _alter_sequence(self, number_increment=None, number_next=None):
for seq in self:
_alter_sequence(self.env.cr, "ir_sequence_%03d_%03d" % (seq.sequence_id.id, seq.id), number_increment=number_increment, number_next=number_next)
@api.model
def create(self, values):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
seq = super(ir_sequence_date_range, self).create(values)
main_seq = seq.sequence_id
if main_seq.implementation == 'standard':
_create_sequence(self.env.cr, "ir_sequence_%03d_%03d" % (main_seq.id, seq.id), main_seq.number_increment, values.get('number_next_actual', 1))
return seq
@api.multi
def unlink(self):
_drop_sequence(self.env.cr, ["ir_sequence_%03d_%03d" % (x.sequence_id.id, x.id) for x in self])
return super(ir_sequence_date_range, self).unlink()
@api.multi
def write(self, values):
if values.get('number_next'):
seq_to_alter = self.filtered(lambda seq: seq.sequence_id.implementation == 'standard')
seq_to_alter._alter_sequence(number_next=values.get('number_next'))
return super(ir_sequence_date_range, self).write(values)
|
agpl-3.0
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/lang/objc/modules/TestObjCModules.py
|
1
|
2652
|
"""Test that importing modules in Objective-C works as expected."""
import unittest2
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ObjCModulesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.m', '// Set breakpoint 0 here.')
@skipUnlessDarwin
@skipIf(macos_version=["<", "10.12"])
@skipIfReproducer # Unexpected packet during replay
def test_expr(self):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
lldbutil.run_break_set_by_file_and_line(
self, "main.m", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
self.expect("expr @import Darwin; 3", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["int", "3"])
self.expect("expr getpid()", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["pid_t"])
self.expect(
"expr @import Foundation; 4",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"int",
"4"])
# Type lookup should still work and print something reasonable
# for types from the module.
self.expect("type lookup NSObject", substrs=["instanceMethod"])
self.expect("expr string.length", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["NSUInteger", "5"])
self.expect("expr array.count", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["NSUInteger", "3"])
self.expect(
"p *[NSURL URLWithString:@\"http://lldb.llvm.org\"]",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"NSURL",
"isa",
"_urlString"])
self.expect(
"p [NSURL URLWithString:@\"http://lldb.llvm.org\"].scheme",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["http"])
|
bsd-3-clause
|
jetty840/ReplicatorG
|
skein_engines/skeinforge-35/fabmetheus_utilities/geometry/creation/line.py
|
6
|
5010
|
"""
Square path.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/02/05 $"
__license__ = 'GPL 3.0'
def getGeometryOutput(derivation, xmlElement):
"Get vector3 vertexes from attribute dictionary."
if derivation == None:
derivation = LineDerivation()
derivation.setToXMLElement(xmlElement)
endMinusStart = derivation.end - derivation.start
endMinusStartLength = abs(endMinusStart)
if endMinusStartLength <= 0.0:
print('Warning, end is the same as start in getGeometryOutput in line for:')
print(derivation.start)
print(derivation.end)
print(xmlElement)
return None
typeStringTwoCharacters = derivation.typeString.lower()[: 2]
xmlElement.attributeDictionary['closed'] = str(derivation.closed)
if derivation.step == None and derivation.steps == None:
return lineation.getGeometryOutputByLoop(lineation.SideLoop([derivation.start, derivation.end]), xmlElement)
loop = [derivation.start]
if derivation.step != None and derivation.steps != None:
stepVector = derivation.step / endMinusStartLength * endMinusStart
derivation.end = derivation.start + stepVector * derivation.steps
return getGeometryOutputByStep(derivation.end, loop, derivation.steps, stepVector, xmlElement)
if derivation.step == None:
stepVector = endMinusStart / derivation.steps
return getGeometryOutputByStep(derivation.end, loop, derivation.steps, stepVector, xmlElement)
endMinusStartLengthOverStep = endMinusStartLength / derivation.step
if typeStringTwoCharacters == 'av':
derivation.steps = max(1.0, round(endMinusStartLengthOverStep))
stepVector = derivation.step / endMinusStartLength * endMinusStart
derivation.end = derivation.start + stepVector * derivation.steps
return getGeometryOutputByStep(derivation.end, loop, derivation.steps, stepVector, xmlElement)
if typeStringTwoCharacters == 'ma':
derivation.steps = math.ceil(endMinusStartLengthOverStep)
if derivation.steps < 1.0:
return lineation.getGeometryOutputByLoop(lineation.SideLoop([derivation.start, derivation.end]), xmlElement)
stepVector = endMinusStart / derivation.steps
return getGeometryOutputByStep(derivation.end, loop, derivation.steps, stepVector, xmlElement)
if typeStringTwoCharacters == 'mi':
derivation.steps = math.floor(endMinusStartLengthOverStep)
if derivation.steps < 1.0:
return lineation.getGeometryOutputByLoop(lineation.SideLoop(loop), xmlElement)
stepVector = endMinusStart / derivation.steps
return getGeometryOutputByStep(derivation.end, loop, derivation.steps, stepVector, xmlElement)
print('Warning, the step type was not one of (average, maximum or minimum) in getGeometryOutput in line for:')
print(derivation.typeString)
print(xmlElement)
loop.append(derivation.end)
return lineation.getGeometryOutputByLoop(lineation.SideLoop(loop), xmlElement)
def getGeometryOutputByArguments(arguments, xmlElement):
"Get vector3 vertexes from attribute dictionary by arguments."
evaluate.setAttributeDictionaryByArguments(['start', 'end', 'step'], arguments, xmlElement)
return getGeometryOutput(None, xmlElement)
def getGeometryOutputByStep(end, loop, steps, stepVector, xmlElement):
"Get line geometry output by the end, loop, steps and stepVector."
stepsFloor = int(math.floor(abs(steps)))
for stepIndex in xrange(1, stepsFloor):
loop.append(loop[stepIndex - 1] + stepVector)
loop.append(end)
return lineation.getGeometryOutputByLoop(lineation.SideLoop(loop), xmlElement)
def processXMLElement(xmlElement):
"Process the xml element."
lineation.processXMLElementByGeometry(getGeometryOutput(None, xmlElement), xmlElement)
class LineDerivation:
"Class to hold line variables."
def __init__(self):
'Set defaults.'
self.closed = False
self.end = Vector3()
self.step = None
self.steps = None
self.start = Vector3()
self.typeString = 'minimum'
def __repr__(self):
"Get the string representation of this LineDerivation."
return str(self.__dict__)
def setToXMLElement(self, xmlElement):
"Set to the xmlElement."
self.closed = evaluate.getEvaluatedBooleanDefault(False, 'closed', xmlElement)
self.end = evaluate.getVector3ByPrefix(self.end, 'end', xmlElement)
self.start = evaluate.getVector3ByPrefix(self.start, 'start', xmlElement)
self.step = evaluate.getEvaluatedFloatDefault(self.step, 'step', xmlElement)
self.steps = evaluate.getEvaluatedFloatDefault(self.steps, 'steps', xmlElement)
self.typeString = evaluate.getEvaluatedStringDefault(self.typeString, 'type', xmlElement)
|
gpl-2.0
|
omtinez/micropython
|
examples/pyb.py
|
99
|
1502
|
# pyboard testing functions for CPython
import time
def delay(n):
#time.sleep(float(n) / 1000)
pass
rand_seed = 1
def rng():
global rand_seed
# for these choice of numbers, see P L'Ecuyer, "Tables of linear congruential generators of different sizes and good lattice structure"
rand_seed = (rand_seed * 653276) % 8388593
return rand_seed
# LCD testing object for PC
# uses double buffering
class LCD:
def __init__(self, port):
self.width = 128
self.height = 32
self.buf1 = [[0 for x in range(self.width)] for y in range(self.height)]
self.buf2 = [[0 for x in range(self.width)] for y in range(self.height)]
def light(self, value):
pass
def fill(self, value):
for y in range(self.height):
for x in range(self.width):
self.buf1[y][x] = self.buf2[y][x] = value
def show(self):
print('') # blank line to separate frames
for y in range(self.height):
for x in range(self.width):
self.buf1[y][x] = self.buf2[y][x]
for y in range(self.height):
row = ''.join(['*' if self.buf1[y][x] else ' ' for x in range(self.width)])
print(row)
def get(self, x, y):
if 0 <= x < self.width and 0 <= y < self.height:
return self.buf1[y][x]
else:
return 0
def pixel(self, x, y, value):
if 0 <= x < self.width and 0 <= y < self.height:
self.buf2[y][x] = value
|
mit
|
CyanideL/android_kernel_samsung_klte
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
DreamStar001/Vegito_vibe_k5
|
tools/perf/scripts/python/net_dropmonitor.py
|
2669
|
1738
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
gpl-2.0
|
Distrotech/intellij-community
|
python/lib/Lib/encodings/cp424.py
|
593
|
12311
|
""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp424',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> SELECT
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> REQUIRED NEW LINE
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> GRAPHIC ESCAPE
u'\x8d' # 0x09 -> SUPERSCRIPT
u'\x8e' # 0x0A -> REPEAT
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
u'\x85' # 0x15 -> NEW LINE
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> UNIT BACK SPACE
u'\x8f' # 0x1B -> CUSTOMER USE ONE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> DIGIT SELECT
u'\x81' # 0x21 -> START OF SIGNIFICANCE
u'\x82' # 0x22 -> FIELD SEPARATOR
u'\x83' # 0x23 -> WORD UNDERSCORE
u'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> SET ATTRIBUTE
u'\x89' # 0x29 -> START FIELD EXTENDED
u'\x8a' # 0x2A -> SET MODE OR SWITCH
u'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
u'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> <reserved>
u'\x91' # 0x31 -> <reserved>
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> INDEX RETURN
u'\x94' # 0x34 -> PRESENTATION POSITION
u'\x95' # 0x35 -> TRANSPARENT
u'\x96' # 0x36 -> NUMERIC BACKSPACE
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> SUBSCRIPT
u'\x99' # 0x39 -> INDENT TABULATION
u'\x9a' # 0x3A -> REVERSE FORM FEED
u'\x9b' # 0x3B -> CUSTOMER USE THREE
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> <reserved>
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u05d0' # 0x41 -> HEBREW LETTER ALEF
u'\u05d1' # 0x42 -> HEBREW LETTER BET
u'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x44 -> HEBREW LETTER DALET
u'\u05d4' # 0x45 -> HEBREW LETTER HE
u'\u05d5' # 0x46 -> HEBREW LETTER VAV
u'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x48 -> HEBREW LETTER HET
u'\u05d8' # 0x49 -> HEBREW LETTER TET
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\u05d9' # 0x51 -> HEBREW LETTER YOD
u'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x53 -> HEBREW LETTER KAF
u'\u05dc' # 0x54 -> HEBREW LETTER LAMED
u'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x56 -> HEBREW LETTER MEM
u'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x58 -> HEBREW LETTER NUN
u'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u05e2' # 0x62 -> HEBREW LETTER AYIN
u'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x64 -> HEBREW LETTER PE
u'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x66 -> HEBREW LETTER TSADI
u'\u05e7' # 0x67 -> HEBREW LETTER QOF
u'\u05e8' # 0x68 -> HEBREW LETTER RESH
u'\u05e9' # 0x69 -> HEBREW LETTER SHIN
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\ufffe' # 0x70 -> UNDEFINED
u'\u05ea' # 0x71 -> HEBREW LETTER TAV
u'\ufffe' # 0x72 -> UNDEFINED
u'\ufffe' # 0x73 -> UNDEFINED
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\ufffe' # 0x75 -> UNDEFINED
u'\ufffe' # 0x76 -> UNDEFINED
u'\ufffe' # 0x77 -> UNDEFINED
u'\u2017' # 0x78 -> DOUBLE LOW LINE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\ufffe' # 0x80 -> UNDEFINED
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\xb8' # 0x9D -> CEDILLA
u'\ufffe' # 0x9E -> UNDEFINED
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\ufffe' # 0xAA -> UNDEFINED
u'\ufffe' # 0xAB -> UNDEFINED
u'\ufffe' # 0xAC -> UNDEFINED
u'\ufffe' # 0xAD -> UNDEFINED
u'\ufffe' # 0xAE -> UNDEFINED
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\ufffe' # 0xCB -> UNDEFINED
u'\ufffe' # 0xCC -> UNDEFINED
u'\ufffe' # 0xCD -> UNDEFINED
u'\ufffe' # 0xCE -> UNDEFINED
u'\ufffe' # 0xCF -> UNDEFINED
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\ufffe' # 0xEE -> UNDEFINED
u'\ufffe' # 0xEF -> UNDEFINED
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\x9f' # 0xFF -> EIGHT ONES
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
shitolepriya/test-frappe
|
frappe/share.py
|
5
|
3602
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
@frappe.whitelist()
def add(doctype, name, user=None, read=1, write=0, share=0, everyone=0, flags=None):
"""Share the given document with a user."""
if not user:
user = frappe.session.user
share_name = get_share_name(doctype, name, user, everyone)
if share_name:
doc = frappe.get_doc("DocShare", share_name)
else:
doc = frappe.new_doc("DocShare")
doc.update({
"user": user,
"share_doctype": doctype,
"share_name": name,
"everyone": cint(everyone)
})
if flags:
doc.flags.update(flags)
doc.update({
# always add read, since you are adding!
"read": 1,
"write": cint(write),
"share": cint(share)
})
doc.save(ignore_permissions=True)
return doc
def remove(doctype, name, user, flags=None):
share_name = frappe.db.get_value("DocShare", {"user": user, "share_name": name,
"share_doctype": doctype})
if share_name:
frappe.delete_doc("DocShare", share_name)
@frappe.whitelist()
def set_permission(doctype, name, user, permission_to, value=1, everyone=0):
"""Set share permission."""
share_name = get_share_name(doctype, name, user, everyone)
value = int(value)
if not share_name:
if value:
share = add(doctype, name, user, everyone=everyone, **{permission_to: 1})
else:
# no share found, nothing to remove
share = {}
pass
else:
share = frappe.get_doc("DocShare", share_name)
share.flags.ignore_permissions = True
share.set(permission_to, value)
if not value:
# un-set higher-order permissions too
if permission_to=="read":
share.read = share.write = share.share = 0
elif permission_to=="write":
share.write = share.share = 0
share.save()
if not (share.read or share.write or share.share):
share.delete()
share = {}
return share
@frappe.whitelist()
def get_users(doctype, name, fields="*"):
"""Get list of users with which this document is shared"""
if isinstance(fields, (tuple, list)):
fields = "`{0}`".format("`, `".join(fields))
return frappe.db.sql("select {0} from tabDocShare where share_doctype=%s and share_name=%s".format(fields),
(doctype, name), as_dict=True)
def get_shared(doctype, user=None, rights=None):
"""Get list of shared document names for given user and DocType.
:param doctype: DocType of which shared names are queried.
:param user: User for which shared names are queried.
:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`"""
if not user:
user = frappe.session.user
if not rights:
rights = ["read"]
condition = " and ".join(["`{0}`=1".format(right) for right in rights])
return frappe.db.sql_list("""select share_name from tabDocShare
where (user=%s {everyone}) and share_doctype=%s and {condition}""".format(
condition=condition, everyone="or everyone=1" if user!="Guest" else ""),
(user, doctype))
def get_shared_doctypes(user=None):
"""Return list of doctypes in which documents are shared for the given user."""
if not user:
user = frappe.session.user
return frappe.db.sql_list("select distinct share_doctype from tabDocShare where (user=%s or everyone=1)", user)
def get_share_name(doctype, name, user, everyone):
if cint(everyone):
share_name = frappe.db.get_value("DocShare", {"everyone": 1, "share_name": name,
"share_doctype": doctype})
else:
share_name = frappe.db.get_value("DocShare", {"user": user, "share_name": name,
"share_doctype": doctype})
return share_name
|
mit
|
kramble/FPGA-Litecoin-Miner
|
ICARUS-LX150/MiningSoftware/pyserial-2.6/test/test.py
|
17
|
7824
|
#! /usr/bin/env python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2008 Chris Liechti <[email protected]>
# this is distributed under a free software license, see license.txt
"""\
Some tests for the serial module.
Part of pyserial (http://pyserial.sf.net) (C)2001-2009 [email protected]
Intended to be run on different platforms, to ensure portability of
the code.
For all these tests a simple hardware is required.
Loopback HW adapter:
Shortcut these pin pairs:
TX <-> RX
RTS <-> CTS
DTR <-> DSR
On a 9 pole DSUB these are the pins (2-3) (4-6) (7-8)
"""
import unittest
import threading
import time
import sys
import serial
# on which port should the tests be performed:
PORT = 0
if sys.version_info >= (3, 0):
def data(string):
return bytes(string, 'latin1')
bytes_0to255 = bytes(range(256))
else:
def data(string): return string
bytes_0to255 = ''.join([chr(x) for x in range(256)])
def segments(data, size=16):
for a in range(0, len(data), size):
yield data[a:a+size]
class Test4_Nonblocking(unittest.TestCase):
"""Test with timeouts"""
timeout = 0
def setUp(self):
self.s = serial.serial_for_url(PORT, timeout=self.timeout)
def tearDown(self):
self.s.close()
def test0_Messy(self):
"""NonBlocking (timeout=0)"""
# this is only here to write out the message in verbose mode
# because Test3 and Test4 print the same messages
def test1_ReadEmpty(self):
"""timeout: After port open, the input buffer must be empty"""
self.failUnlessEqual(self.s.read(1), data(''), "expected empty buffer")
def test2_Loopback(self):
"""timeout: each sent character should return (binary test).
this is also a test for the binary capability of a port."""
for block in segments(bytes_0to255):
length = len(block)
self.s.write(block)
# there might be a small delay until the character is ready (especially on win32)
time.sleep(0.05)
self.failUnlessEqual(self.s.inWaiting(), length, "expected exactly %d character for inWainting()" % length)
self.failUnlessEqual(self.s.read(length), block)#, "expected a %r which was written before" % block)
self.failUnlessEqual(self.s.read(1), data(''), "expected empty buffer after all sent chars are read")
def test2_LoopbackTimeout(self):
"""timeout: test the timeout/immediate return.
partial results should be returned."""
self.s.write(data("HELLO"))
time.sleep(0.1) # there might be a small delay until the character is ready (especially on win32 and rfc2217)
# read more characters as are available to run in the timeout
self.failUnlessEqual(self.s.read(10), data('HELLO'), "expected the 'HELLO' which was written before")
self.failUnlessEqual(self.s.read(1), data(''), "expected empty buffer after all sent chars are read")
class Test3_Timeout(Test4_Nonblocking):
"""Same tests as the NonBlocking ones but this time with timeout"""
timeout = 1
def test0_Messy(self):
"""Blocking (timeout=1)"""
# this is only here to write out the message in verbose mode
# because Test3 and Test4 print the same messages
class SendEvent(threading.Thread):
def __init__(self, serial, delay=3):
threading.Thread.__init__(self)
self.serial = serial
self.delay = delay
self.x = threading.Event()
self.stopped = 0
self.start()
def run(self):
time.sleep(self.delay)
self.x.set()
if not self.stopped:
self.serial.write(data("E"))
self.serial.flush()
def isSet(self):
return self.x.isSet()
def stop(self):
self.stopped = 1
self.x.wait()
class Test1_Forever(unittest.TestCase):
"""Tests a port with no timeout. These tests require that a
character is sent after some time to stop the test, this is done
through the SendEvent class and the Loopback HW."""
def setUp(self):
self.s = serial.serial_for_url(PORT, timeout=None)
self.event = SendEvent(self.s)
def tearDown(self):
self.event.stop()
self.s.close()
def test2_ReadEmpty(self):
"""no timeout: after port open, the input buffer must be empty (read).
a character is sent after some time to terminate the test (SendEvent)."""
c = self.s.read(1)
if not (self.event.isSet() and c == data('E')):
self.fail("expected marker (evt=%r, c=%r)" % (self.event.isSet(), c))
class Test2_Forever(unittest.TestCase):
"""Tests a port with no timeout"""
def setUp(self):
self.s = serial.serial_for_url(PORT, timeout=None)
def tearDown(self):
self.s.close()
def test1_inWaitingEmpty(self):
"""no timeout: after port open, the input buffer must be empty (inWaiting)"""
self.failUnlessEqual(self.s.inWaiting(), 0, "expected empty buffer")
def test2_Loopback(self):
"""no timeout: each sent character should return (binary test).
this is also a test for the binary capability of a port."""
for block in segments(bytes_0to255):
length = len(block)
self.s.write(block)
# there might be a small delay until the character is ready (especially on win32 and rfc2217)
time.sleep(0.05)
self.failUnlessEqual(self.s.inWaiting(), length)#, "expected exactly %d character for inWainting()" % length)
self.failUnlessEqual(self.s.read(length), block) #, "expected %r which was written before" % block)
self.failUnlessEqual(self.s.inWaiting(), 0, "expected empty buffer after all sent chars are read")
class Test0_DataWires(unittest.TestCase):
"""Test modem control lines"""
def setUp(self):
self.s = serial.serial_for_url(PORT)
def tearDown(self):
self.s.close()
def test1_RTS(self):
"""Test RTS/CTS"""
self.s.setRTS(0)
time.sleep(1.1)
self.failUnless(not self.s.getCTS(), "CTS -> 0")
self.s.setRTS(1)
time.sleep(1.1)
self.failUnless(self.s.getCTS(), "CTS -> 1")
def test2_DTR(self):
"""Test DTR/DSR"""
self.s.setDTR(0)
time.sleep(1.1)
self.failUnless(not self.s.getDSR(), "DSR -> 0")
self.s.setDTR(1)
time.sleep(1.1)
self.failUnless(self.s.getDSR(), "DSR -> 1")
def test3_RI(self):
"""Test RI"""
self.failUnless(not self.s.getRI(), "RI -> 0")
class Test_MoreTimeouts(unittest.TestCase):
"""Test with timeouts"""
def setUp(self):
# create an closed serial port
self.s = serial.serial_for_url(PORT, do_not_open=True)
def tearDown(self):
self.s.close()
def test_WriteTimeout(self):
"""Test write() timeout."""
# use xonxoff setting and the loop-back adapter to switch traffic on hold
self.s.port = PORT
self.s.writeTimeout = 1
self.s.xonxoff = 1
self.s.open()
self.s.write(serial.XOFF)
time.sleep(0.5) # some systems need a little delay so that they can react on XOFF
t1 = time.time()
self.failUnlessRaises(serial.SerialTimeoutException, self.s.write, data("timeout please"*200))
t2 = time.time()
self.failUnless( 0.9 <= (t2-t1) < 2.1, "Timeout not in the given interval (%s)" % (t2-t1))
if __name__ == '__main__':
import sys
sys.stdout.write(__doc__)
if len(sys.argv) > 1:
PORT = sys.argv[1]
sys.stdout.write("Testing port: %r\n" % PORT)
sys.argv[1:] = ['-v']
# When this module is executed from the command-line, it runs all its tests
unittest.main()
|
gpl-3.0
|
heemanshu/swift_juno
|
swift/cli/form_signature.py
|
17
|
5260
|
# Copyright (c) 2010-2012 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for generating a form signature for use with FormPost middleware.
"""
import hmac
from hashlib import sha1
from os.path import basename
from time import time
def main(argv):
if len(argv) != 7:
prog = basename(argv[0])
print 'Syntax: %s <path> <redirect> <max_file_size> ' \
'<max_file_count> <seconds> <key>' % prog
print
print 'Where:'
print ' <path> The prefix to use for form uploaded'
print ' objects. For example:'
print ' /v1/account/container/object_prefix_ would'
print ' ensure all form uploads have that path'
print ' prepended to the browser-given file name.'
print ' <redirect> The URL to redirect the browser to after'
print ' the uploads have completed.'
print ' <max_file_size> The maximum file size per file uploaded.'
print ' <max_file_count> The maximum number of uploaded files'
print ' allowed.'
print ' <seconds> The number of seconds from now to allow'
print ' the form post to begin.'
print ' <key> The X-Account-Meta-Temp-URL-Key for the'
print ' account.'
print
print 'Example output:'
print ' Expires: 1323842228'
print ' Signature: 18de97e47345a82c4dbfb3b06a640dbb'
print
print 'Sample form:'
print
print('NOTE: the <form> tag\'s "action" attribute does not contain '
'the Swift cluster\'s hostname.')
print 'You should manually add it before using the form.'
print
print('<form action="/v1/a/c/o" method="POST" '
'enctype="multipart/form-data">')
print ' <input type="hidden" name="max_file_size" value="123" />'
print ' ... more HTML ...'
print ' <input type="submit" />'
print '</form>'
return 1
path, redirect, max_file_size, max_file_count, seconds, key = argv[1:]
try:
max_file_size = int(max_file_size)
except ValueError:
max_file_size = -1
if max_file_size < 0:
print 'Please use a <max_file_size> value greater than or equal to 0.'
return 1
try:
max_file_count = int(max_file_count)
except ValueError:
max_file_count = 0
if max_file_count < 1:
print 'Please use a positive <max_file_count> value.'
return 1
try:
expires = int(time() + int(seconds))
except ValueError:
expires = 0
if expires < 1:
print 'Please use a positive <seconds> value.'
return 1
parts = path.split('/', 4)
# Must be four parts, ['', 'v1', 'a', 'c'], must be a v1 request, have
# account and container values, and optionally have an object prefix.
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
print '<path> must point to a container at least.'
print 'For example: /v1/account/container'
print ' Or: /v1/account/container/object_prefix'
return 1
sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size,
max_file_count, expires),
sha1).hexdigest()
print ' Expires:', expires
print 'Signature:', sig
print ''
print('Sample form:\n')
print('NOTE: the <form> tag\'s "action" attribute does not '
'contain the Swift cluster\'s hostname.')
print('You should manually add it before using the form.\n')
print('<form action="%s" method="POST" enctype="multipart/form-data">'
% path)
if redirect:
print(' <input type="hidden" name="redirect" value="%s" />'
% redirect)
print(' <input type="hidden" name="max_file_size" value="%d" />'
% max_file_size)
print(' <input type="hidden" name="max_file_count" value="%d" />'
% max_file_count)
print(' <input type="hidden" name="expires" value="%d" />' % expires)
print(' <input type="hidden" name="signature" value="%s" />' % sig)
print(' <!-- This signature allows for at most %d files, -->'
% max_file_count)
print(' <!-- but it may also have any smaller number. -->')
print(' <!-- Remove file inputs as needed. -->')
for i in range(max_file_count):
print(' <input type="file" name="file%d" />' % i)
print(' <br />')
print(' <input type="submit" />')
print('</form>')
return 0
|
apache-2.0
|
nikhilsaraf/Twitter-Analytics
|
venv/lib/python2.7/site-packages/pip/utils/__init__.py
|
124
|
27531
|
from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_level=std_logging.DEBUG, command_desc=None,
extra_environ=None, spinner=None):
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. Even though it is nominally the default, it is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip set show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
if stdout is not None:
all_output = []
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
proc.wait()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
|
gpl-3.0
|
PrashntS/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
swinman/pyusb
|
tests/test_util.py
|
8
|
8744
|
# Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
import utils
import unittest
from usb.util import *
from devinfo import *
from usb._debug import methodtrace
import usb.backend
class _ConfigurationDescriptor(object):
def __init__(self, bConfigurationValue):
self.bLength = 9
self.bDescriptorType = DESC_TYPE_CONFIG
self.wTotalLength = 18
self.bNumInterfaces = 0
self.bConfigurationValue = bConfigurationValue
self.iConfiguration = 0
self.bmAttributes = 0xc0
self.bMaxPower = 50
class _DeviceDescriptor(object):
def __init__(self):
self.configurations = (_ConfigurationDescriptor(1), _ConfigurationDescriptor(2))
self.bLength = 18
self.bDescriptorType = usb.util.DESC_TYPE_DEVICE
self.bcdUSB = 0x0200
self.idVendor = ID_VENDOR
self.idProduct = ID_PRODUCT
self.bcdDevice = 0x0001
self.iManufacturer = 0
self.iProduct = 0
self.iSerialNumber = 0
self.bNumConfigurations = len(self.configurations)
self.bMaxPacketSize0 = 64
self.bDeviceClass = 0xff
self.bDeviceSubClass = 0xff
self.bDeviceProtocol = 0xff
class FindDescriptorTest(unittest.TestCase):
@methodtrace(utils.logger)
def runTest(self):
d = usb.core.find(idVendor=ID_VENDOR)
if d is None:
return
self.assertEqual(find_descriptor(d, bConfigurationValue=10), None)
self.assertNotEqual(find_descriptor(d, bConfigurationValue=1), None)
self.assertEqual(len(list(find_descriptor(d, find_all=True, bConfigurationValue=10))), 0)
self.assertEqual(len(list(find_descriptor(d, find_all=True, bConfigurationValue=1))), 1)
self.assertEqual(len(list(find_descriptor(d, find_all=True))), d.bNumConfigurations)
self.assertEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 10), None)
self.assertNotEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 1), None)
self.assertEqual(len(list(find_descriptor(d, find_all=True, custom_match = lambda c: c.bConfigurationValue == 10))), 0)
self.assertEqual(len(list(find_descriptor(d, find_all=True, custom_match = lambda c: c.bConfigurationValue == 1))), 1)
self.assertEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 10, bLength=9), None)
self.assertNotEqual(find_descriptor(d, custom_match = lambda c: c.bConfigurationValue == 1, bLength=9), None)
cfg = find_descriptor(d)
self.assertTrue(isinstance(cfg, usb.core.Configuration))
intf = find_descriptor(cfg)
self.assertTrue(isinstance(intf, usb.core.Interface))
class UtilTest(unittest.TestCase):
@methodtrace(utils.logger)
def test_endpoint_address(self):
self.assertEqual(endpoint_address(0x01), 0x01)
self.assertEqual(endpoint_address(0x81), 0x01)
@methodtrace(utils.logger)
def test_endpoint_direction(self):
self.assertEqual(endpoint_direction(0x01), ENDPOINT_OUT)
self.assertEqual(endpoint_direction(0x81), ENDPOINT_IN)
@methodtrace(utils.logger)
def test_endpoint_type(self):
self.assertEqual(endpoint_type(ENDPOINT_TYPE_CTRL), ENDPOINT_TYPE_CTRL)
self.assertEqual(endpoint_type(ENDPOINT_TYPE_ISO), ENDPOINT_TYPE_ISO)
self.assertEqual(endpoint_type(ENDPOINT_TYPE_INTR), ENDPOINT_TYPE_INTR)
self.assertEqual(endpoint_type(ENDPOINT_TYPE_BULK), ENDPOINT_TYPE_BULK)
@methodtrace(utils.logger)
def test_ctrl_direction(self):
self.assertEqual(ctrl_direction(CTRL_OUT), CTRL_OUT)
self.assertEqual(ctrl_direction(CTRL_IN), CTRL_IN)
@methodtrace(utils.logger)
def test_build_request_type(self):
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE), 0x00)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE), 0x01)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_ENDPOINT), 0x02)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_OTHER), 0x03)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_DEVICE), 0x20)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE), 0x21)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_ENDPOINT), 0x22)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_OTHER), 0x23)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_DEVICE), 0x40)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_INTERFACE), 0x41)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_ENDPOINT), 0x42)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_OTHER), 0x43)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_DEVICE), 0x60)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_INTERFACE), 0x61)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_ENDPOINT), 0x62)
self.assertEqual(build_request_type(CTRL_OUT, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_OTHER), 0x63)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE), 0x80)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE), 0x81)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_ENDPOINT), 0x82)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_OTHER), 0x83)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_DEVICE), 0xa0)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE), 0xa1)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_ENDPOINT), 0xa2)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_OTHER), 0xa3)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_DEVICE), 0xc0)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_INTERFACE), 0xc1)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_ENDPOINT), 0xc2)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_OTHER), 0xc3)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_DEVICE), 0xe0)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_INTERFACE), 0xe1)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_ENDPOINT), 0xe2)
self.assertEqual(build_request_type(CTRL_IN, CTRL_TYPE_RESERVED, CTRL_RECIPIENT_OTHER), 0xe3)
def get_suite():
suite = unittest.TestSuite()
suite.addTest(FindDescriptorTest())
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UtilTest))
return suite
if __name__ == '__main__':
utils.run_tests(get_suite())
|
bsd-3-clause
|
kouk/boto
|
boto/glacier/job.py
|
153
|
7999
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import math
import socket
from boto.glacier.exceptions import TreeHashDoesNotMatchError, \
DownloadArchiveError
from boto.glacier.utils import tree_hash_from_str
class Job(object):
DefaultPartSize = 4 * 1024 * 1024
ResponseDataElements = (('Action', 'action', None),
('ArchiveId', 'archive_id', None),
('ArchiveSizeInBytes', 'archive_size', 0),
('Completed', 'completed', False),
('CompletionDate', 'completion_date', None),
('CreationDate', 'creation_date', None),
('InventorySizeInBytes', 'inventory_size', 0),
('JobDescription', 'description', None),
('JobId', 'id', None),
('SHA256TreeHash', 'sha256_treehash', None),
('SNSTopic', 'sns_topic', None),
('StatusCode', 'status_code', None),
('StatusMessage', 'status_message', None),
('VaultARN', 'arn', None))
def __init__(self, vault, response_data=None):
self.vault = vault
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, response_data[response_name])
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Job(%s)' % self.arn
def get_output(self, byte_range=None, validate_checksum=False):
"""
This operation downloads the output of the job. Depending on
the job type you specified when you initiated the job, the
output will be either the content of an archive or a vault
inventory.
You can download all the job output or download a portion of
the output by specifying a byte range. In the case of an
archive retrieval job, depending on the byte range you
specify, Amazon Glacier returns the checksum for the portion
of the data. You can compute the checksum on the client and
verify that the values match to ensure the portion you
downloaded is the correct data.
:type byte_range: tuple
:param range: A tuple of integer specifying the slice (in bytes)
of the archive you want to receive
:type validate_checksum: bool
:param validate_checksum: Specify whether or not to validate
the associate tree hash. If the response does not contain
a TreeHash, then no checksum will be verified.
"""
response = self.vault.layer1.get_job_output(self.vault.name,
self.id,
byte_range)
if validate_checksum and 'TreeHash' in response:
data = response.read()
actual_tree_hash = tree_hash_from_str(data)
if response['TreeHash'] != actual_tree_hash:
raise TreeHashDoesNotMatchError(
"The calculated tree hash %s does not match the "
"expected tree hash %s for the byte range %s" % (
actual_tree_hash, response['TreeHash'], byte_range))
return response
def _calc_num_chunks(self, chunk_size):
return int(math.ceil(self.archive_size / float(chunk_size)))
def download_to_file(self, filename, chunk_size=DefaultPartSize,
verify_hashes=True, retry_exceptions=(socket.error,)):
"""Download an archive to a file by name.
:type filename: str
:param filename: The name of the file where the archive
contents will be saved.
:type chunk_size: int
:param chunk_size: The chunk size to use when downloading
the archive.
:type verify_hashes: bool
:param verify_hashes: Indicates whether or not to verify
the tree hashes for each downloaded chunk.
"""
num_chunks = self._calc_num_chunks(chunk_size)
with open(filename, 'wb') as output_file:
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
verify_hashes=True,
retry_exceptions=(socket.error,)):
"""Download an archive to a file object.
:type output_file: file
:param output_file: The file object where the archive
contents will be saved.
:type chunk_size: int
:param chunk_size: The chunk size to use when downloading
the archive.
:type verify_hashes: bool
:param verify_hashes: Indicates whether or not to verify
the tree hashes for each downloaded chunk.
"""
num_chunks = self._calc_num_chunks(chunk_size)
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
retry_exceptions):
for i in range(num_chunks):
byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1)
data, expected_tree_hash = self._download_byte_range(
byte_range, retry_exceptions)
if verify_hashes:
actual_tree_hash = tree_hash_from_str(data)
if expected_tree_hash != actual_tree_hash:
raise TreeHashDoesNotMatchError(
"The calculated tree hash %s does not match the "
"expected tree hash %s for the byte range %s" % (
actual_tree_hash, expected_tree_hash, byte_range))
fileobj.write(data)
def _download_byte_range(self, byte_range, retry_exceptions):
# You can occasionally get socket.errors when downloading
# chunks from Glacier, so each chunk can be retried up
# to 5 times.
for _ in range(5):
try:
response = self.get_output(byte_range)
data = response.read()
expected_tree_hash = response['TreeHash']
return data, expected_tree_hash
except retry_exceptions as e:
continue
else:
raise DownloadArchiveError("There was an error downloading"
"byte range %s: %s" % (byte_range,
e))
|
mit
|
marc-sensenich/ansible
|
lib/ansible/modules/monitoring/sensu_check.py
|
46
|
12707
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_check
short_description: Manage Sensu checks
version_added: 2.0
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
- they are simply specified for your convenience.
options:
name:
description:
- The name of the check
- This is the key that is used to determine whether a check exists
required: true
state:
description:
- Whether the check should be present or not
choices: [ 'present', 'absent' ]
default: present
path:
description:
- Path to the json file of the check to be added/removed.
- Will be created if it does not exist (unless I(state=absent)).
- The parent folders need to exist when I(state=present), otherwise an error will be thrown
default: /etc/sensu/conf.d/checks.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so
- you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
command:
description:
- Path to the sensu check to run (not required when I(state=absent))
required: true
handlers:
description:
- List of handlers to notify when the check fails
default: []
subscribers:
description:
- List of subscribers/channels this check should run for
- See sensu_subscribers to subscribe a machine to a channel
default: []
interval:
description:
- Check interval in seconds
timeout:
description:
- Timeout for the check
default: 10
ttl:
description:
- Time to live in seconds until the check is considered stale
version_added: 2.4
handle:
description:
- Whether the check should be handled or not
type: bool
default: 'yes'
subdue_begin:
description:
- When to disable handling of check failures
subdue_end:
description:
- When to enable handling of check failures
dependencies:
description:
- Other checks this check depends on, if dependencies fail,
- handling of this check will be disabled
default: []
metric:
description:
- Whether the check is a metric
type: bool
default: 'no'
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- This option obviates the need for specifying the I(subscribers) option
type: bool
default: 'no'
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
type: bool
default: 'yes'
occurrences:
description:
- Number of event occurrences before the handler should take action
default: 1
refresh:
description:
- Number of seconds handlers should wait before taking second action
aggregate:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
type: bool
default: 'no'
low_flap_threshold:
description:
- The low threshold for flap detection
high_flap_threshold:
description:
- The high threshold for flap detection
custom:
version_added: "2.1"
description:
- A hash/dictionary of custom parameters for mixing to the configuration.
- You can't rewrite others module parameters using this
default: {}
source:
version_added: "2.1"
description:
- The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
author: "Anders Ingemann (@andsens)"
'''
EXAMPLES = '''
# Fetch metrics about the CPU load every 60 seconds,
# the sensu server has a handler called 'relay' which forwards stats to graphite
- name: get cpu metrics
sensu_check:
name: cpu_load
command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
metric: yes
handlers: relay
subscribers: common
interval: 60
# Check whether nginx is running
- name: check nginx process
sensu_check:
name: nginx_running
command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
handlers: default
subscribers: nginx
interval: 60
# Stop monitoring the disk capacity.
# Note that the check will still show up in the sensu dashboard,
# to remove it completely you need to issue a DELETE request to the sensu api.
- name: check disk
sensu_check:
name: check_disk_capacity
state: absent
'''
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def sensu_check(module, path, name, state='present', backup=False):
changed = False
reasons = []
stream = None
try:
try:
stream = open(path, 'r')
config = json.load(stream)
except IOError as e:
if e.errno == 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
finally:
if stream:
stream.close()
if 'checks' not in config:
if state == 'absent':
reasons.append('`checks\' section did not exist and state is `absent\'')
return changed, reasons
config['checks'] = {}
changed = True
reasons.append('`checks\' section did not exist')
if state == 'absent':
if name in config['checks']:
del config['checks'][name]
changed = True
reasons.append('check was present and state is `absent\'')
if state == 'present':
if name not in config['checks']:
check = {}
config['checks'][name] = check
changed = True
reasons.append('check was absent and state is `present\'')
else:
check = config['checks'][name]
simple_opts = ['command',
'handlers',
'subscribers',
'interval',
'timeout',
'ttl',
'handle',
'dependencies',
'standalone',
'publish',
'occurrences',
'refresh',
'aggregate',
'low_flap_threshold',
'high_flap_threshold',
'source',
]
for opt in simple_opts:
if module.params[opt] is not None:
if opt not in check or check[opt] != module.params[opt]:
check[opt] = module.params[opt]
changed = True
reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
else:
if opt in check:
del check[opt]
changed = True
reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['custom']:
# Convert to json
custom_params = module.params['custom']
overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
if overwrited_fields:
msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
module.fail_json(msg=msg)
for k, v in custom_params.items():
if k in config['checks'][name]:
if not config['checks'][name][k] == v:
changed = True
reasons.append('`custom param {opt}\' was changed'.format(opt=k))
else:
changed = True
reasons.append('`custom param {opt}\' was added'.format(opt=k))
check[k] = v
simple_opts += custom_params.keys()
# Remove obsolete custom params
for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
changed = True
reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
del check[opt]
if module.params['metric']:
if 'type' not in check or check['type'] != 'metric':
check['type'] = 'metric'
changed = True
reasons.append('`type\' was not defined or not `metric\'')
if not module.params['metric'] and 'type' in check:
del check['type']
changed = True
reasons.append('`type\' was defined')
if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
subdue = {'begin': module.params['subdue_begin'],
'end': module.params['subdue_end'],
}
if 'subdue' not in check or check['subdue'] != subdue:
check['subdue'] = subdue
changed = True
reasons.append('`subdue\' did not exist or was different')
else:
if 'subdue' in check:
del check['subdue']
changed = True
reasons.append('`subdue\' was removed')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
try:
stream = open(path, 'w')
stream.write(json.dumps(config, indent=2) + '\n')
except IOError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
finally:
if stream:
stream.close()
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
'command': {'type': 'str'},
'handlers': {'type': 'list'},
'subscribers': {'type': 'list'},
'interval': {'type': 'int'},
'timeout': {'type': 'int'},
'ttl': {'type': 'int'},
'handle': {'type': 'bool'},
'subdue_begin': {'type': 'str'},
'subdue_end': {'type': 'str'},
'dependencies': {'type': 'list'},
'metric': {'type': 'bool', 'default': 'no'},
'standalone': {'type': 'bool'},
'publish': {'type': 'bool'},
'occurrences': {'type': 'int'},
'refresh': {'type': 'int'},
'aggregate': {'type': 'bool'},
'low_flap_threshold': {'type': 'int'},
'high_flap_threshold': {'type': 'int'},
'custom': {'type': 'dict'},
'source': {'type': 'str'},
}
required_together = [['subdue_begin', 'subdue_end']]
module = AnsibleModule(argument_spec=arg_spec,
required_together=required_together,
supports_check_mode=True)
if module.params['state'] != 'absent' and module.params['command'] is None:
module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_check(module, path, name, state, backup)
module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
if __name__ == '__main__':
main()
|
gpl-3.0
|
dogecoin/dogecoin
|
src/test/bitcoin-util-test.py
|
51
|
1534
|
#!/usr/bin/env python
# Copyright 2014 BitPay Inc.
# Copyright 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import os
import bctest
import buildenv
import argparse
import logging
help_text="""Test framework for bitcoin utils.
Runs automatically during `make check`.
Can also be run manually from the src directory by specifying the source directory:
test/bitcoin-util-test.py --srcdir='srcdir' [--verbose]
"""
if __name__ == '__main__':
# Try to get the source directory from the environment variables. This will
# be set for `make check` automated runs. If environment variable is not set,
# then get the source directory from command line args.
try:
srcdir = os.environ["srcdir"]
verbose = False
except:
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-s', '--srcdir')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
srcdir = args.srcdir
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format = formatter, level=level)
bctest.bctester(srcdir + "/test/data", "bitcoin-util-test.json", buildenv)
|
mit
|
danielfrg/s3contents
|
s3contents/genericfs.py
|
1
|
1876
|
"""
Generic FileSystem class to be used by the Content Manager
"""
from s3contents.ipycompat import HasTraits
class GenericFS(HasTraits):
def ls(self, path=""):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def isfile(self, path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def isdir(self, path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def mv(self, old_path, new_path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def cp(self, old_path, new_path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def rm(self, path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def mkdir(self, path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def read(self, path, format):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def lstat(self, path):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
def write(self, path, content, format):
raise NotImplementedError(
"Should be implemented by the file system abstraction"
)
class GenericFSError(Exception):
pass
class NoSuchFile(GenericFSError):
def __init__(self, path, *args, **kwargs):
self.path = path
self.message = "No such file or directory: {}".format(path)
super(NoSuchFile, self).__init__(self.message, *args, **kwargs)
|
apache-2.0
|
DevinDewitt/pyqt5
|
pyuic/uic/widget-plugins/qtdeclarative.py
|
2
|
1565
|
#############################################################################
##
## Copyright (c) 2014 Riverbank Computing Limited <[email protected]>
##
## This file is part of PyQt5.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
# If pluginType is MODULE, the plugin loader will call moduleInformation. The
# variable MODULE is inserted into the local namespace by the plugin loader.
pluginType = MODULE
# moduleInformation() must return a tuple (module, widget_list). If "module"
# is "A" and any widget from this module is used, the code generator will write
# "import A". If "module" is "A[.B].C", the code generator will write
# "from A[.B] import C". Each entry in "widget_list" must be unique.
def moduleInformation():
return "PyQt5.QtDeclarative", ("QDeclarativeView", )
|
gpl-3.0
|
halfak/mwxml
|
mwxml/utilities/inflate.py
|
2
|
1818
|
r"""
``$ mwxml inflate -h``
::
Converts a stream of flat RevisionDocument JSON blobs into hierarchical JSON
RevisionDocument JSON blobs.
Usage:
inflate (-h|--help)
inflate [<input-file>...] [--threads=<num>] [--output=<path>]
[--compress=<type>] [--verbose] [--debug]
Options:
-h|--help Print this documentation
<input-file> The path to file containing a sequence of flat JSON
revision documents [default: <stdin>]
--threads=<num> If a collection of files are provided, how many
processor threads? [default: <cpu_count>]
--output=<path> Write output to a directory with one output file
per input path. [default: <stdout>]
--compress=<type> If set, output written to the output-dir will be
compressed in this format. [default: bz2]
--verbose Print progress information to stderr. Kind of a
mess when running multi-threaded.
--debug Print debug logs.
"""
import sys
import mwcli
def _single_inflate(flat_json):
inflated = {}
flat_keys = flat_json.keys()
for key in flat_keys:
bottom_dict = inflated
parts = key.split('_')
for sub_key in parts[:-1]:
if sub_key not in bottom_dict:
bottom_dict[sub_key] = {}
bottom_dict = bottom_dict[sub_key]
bottom_dict[parts[-1]] = flat_json[key]
return inflated
def inflate(flat_jsons, verbose=False):
for flat_json in flat_jsons:
inflated = _single_inflate(flat_json)
yield inflated
streamer = mwcli.Streamer(
__doc__,
__name__,
inflate
)
main = streamer.main
|
mit
|
saleeh93/buck-cutom
|
third-party/py/twitter-commons/src/python/twitter/common/python/package.py
|
23
|
9291
|
import contextlib
import os
import tarfile
import zipfile
from .base import maybe_requirement
from .common import safe_mkdtemp
from .http.link import Link
from .interpreter import PythonInterpreter
from .pep425 import PEP425, PEP425Extras
from .platforms import Platform
from pkg_resources import (
EGG_NAME,
parse_version,
safe_name,
)
class Package(Link):
"""Base class for named Python binary packages (e.g. source, egg, wheel)."""
# The registry of concrete implementations
_REGISTRY = set()
@classmethod
def register(cls, package_type):
"""Register a concrete implementation of a Package to be recognized by twitter.common.python."""
if not issubclass(package_type, cls):
raise TypeError('package_type must be a subclass of Package.')
cls._REGISTRY.add(package_type)
@classmethod
def from_href(cls, href, **kw):
"""Convert from a url to Package.
:param href: The url to parse
:type href: string
:returns: A Package object if a valid concrete implementation exists, otherwise None.
"""
for package_type in cls._REGISTRY:
try:
return package_type(href, **kw)
except package_type.InvalidLink:
continue
@property
def name(self):
return NotImplementedError
@property
def raw_version(self):
return NotImplementedError
@property
def version(self):
return parse_version(self.raw_version)
def satisfies(self, requirement):
"""Determine whether this package matches the requirement.
:param requirement: The requirement to compare this Package against
:type requirement: string or :class:`pkg_resources.Requirement`
:returns: True if the package matches the requirement, otherwise False
"""
requirement = maybe_requirement(requirement)
link_name = safe_name(self.name).lower()
if link_name != requirement.key:
return False
return self.raw_version in requirement
def compatible(self, identity, platform=Platform.current()):
"""Is this link compatible with the given :class:`PythonIdentity` identity and platform?
:param identity: The Python identity (e.g. CPython 2.7.5) against which compatibility
should be checked.
:type identity: :class:`PythonIdentity`
:param platform: The platform against which compatibility should be checked. If None, do not
check platform compatibility.
:type platform: string or None
"""
raise NotImplementedError
class SourcePackage(Package):
"""A Package representing an uncompiled/unbuilt source distribution."""
EXTENSIONS = {
'.tar': (tarfile.TarFile.open, tarfile.ReadError),
'.tar.gz': (tarfile.TarFile.open, tarfile.ReadError),
'.tar.bz2': (tarfile.TarFile.open, tarfile.ReadError),
'.tgz': (tarfile.TarFile.open, tarfile.ReadError),
'.zip': (zipfile.ZipFile, zipfile.BadZipfile)
}
@classmethod
def split_fragment(cls, fragment):
"""A heuristic used to split a string into version name/fragment:
>>> split_fragment('pysolr-2.1.0-beta')
('pysolr', '2.1.0-beta')
>>> split_fragment('cElementTree-1.0.5-20051216')
('cElementTree', '1.0.5-20051216')
>>> split_fragment('pil-1.1.7b1-20090412')
('pil', '1.1.7b1-20090412')
>>> split_fragment('django-plugin-2-2.3')
('django-plugin-2', '2.3')
"""
def likely_version_component(enumerated_fragment):
return sum(bool(v and v[0].isdigit()) for v in enumerated_fragment[1].split('.'))
fragments = fragment.split('-')
if len(fragments) == 1:
return fragment, ''
max_index, _ = max(enumerate(fragments), key=likely_version_component)
return '-'.join(fragments[0:max_index]), '-'.join(fragments[max_index:])
def __init__(self, url, **kw):
super(SourcePackage, self).__init__(url, **kw)
for ext, class_info in self.EXTENSIONS.items():
if self.filename.endswith(ext):
self._archive_class = class_info
fragment = self.filename[:-len(ext)]
break
else:
raise self.InvalidLink('%s does not end with any of: %s' % (
self.filename, ' '.join(self.EXTENSIONS)))
self._name, self._raw_version = self.split_fragment(fragment)
@property
def name(self):
return safe_name(self._name)
@property
def raw_version(self):
return safe_name(self._raw_version)
@classmethod
def first_nontrivial_dir(cls, path):
files = os.listdir(path)
if len(files) == 1 and os.path.isdir(os.path.join(path, files[0])):
return cls.first_nontrivial_dir(os.path.join(path, files[0]))
else:
return path
def _unpack(self, filename, location=None):
path = location or safe_mkdtemp()
archive_class, error_class = self._archive_class
try:
with contextlib.closing(archive_class(filename)) as package:
package.extractall(path=path)
except error_class:
raise self.UnreadableLink('Could not read %s' % self.url)
return self.first_nontrivial_dir(path)
def fetch(self, location=None, conn_timeout=None):
"""Fetch and unpack this source target into the location.
:param location: The location into which the archive should be unpacked. If None, a temporary
ephemeral directory will be created.
:type location: string or None
:param conn_timeout: A connection timeout for the fetch. If None, a default is used.
:type conn_timeout: float or None
:returns: The assumed root directory of the package.
"""
target = super(SourcePackage, self).fetch(conn_timeout=conn_timeout)
return self._unpack(target, location)
# SourcePackages are always compatible as they can be translated to a distribution.
def compatible(self, identity, platform=Platform.current()):
return True
class EggPackage(Package):
"""A Package representing a built egg."""
def __init__(self, url, **kw):
super(EggPackage, self).__init__(url, **kw)
filename, ext = os.path.splitext(self.filename)
if ext.lower() != '.egg':
raise self.InvalidLink('Not an egg: %s' % filename)
matcher = EGG_NAME(filename)
if not matcher:
raise self.InvalidLink('Could not match egg: %s' % filename)
self._name, self._raw_version, self._py_version, self._platform = matcher.group(
'name', 'ver', 'pyver', 'plat')
if self._raw_version is None or self._py_version is None:
raise self.InvalidLink('url with .egg extension but bad name: %s' % url)
def __hash__(self):
return hash((self.name, self.version, self.py_version, self.platform))
@property
def name(self):
return safe_name(self._name)
@property
def raw_version(self):
return safe_name(self._raw_version)
@property
def py_version(self):
return self._py_version
@property
def platform(self):
return self._platform
def compatible(self, identity, platform=Platform.current()):
if not Platform.version_compatible(self.py_version, identity.python):
return False
if not Platform.compatible(self.platform, platform):
return False
return True
class WheelPackage(Package):
"""A Package representing a built wheel."""
def __init__(self, url, **kw):
super(WheelPackage, self).__init__(url, **kw)
filename, ext = os.path.splitext(self.filename)
if ext.lower() != '.whl':
raise self.InvalidLink('Not a wheel: %s' % filename)
try:
self._name, self._raw_version, self._py_tag, self._abi_tag, self._arch_tag = (
filename.split('-'))
except ValueError:
raise self.InvalidLink('Wheel filename malformed.')
# See https://github.com/pypa/pip/issues/1150 for why this is unavoidable.
self._name.replace('_', '-')
self._raw_version.replace('_', '-')
self._supported_tags = frozenset(self._iter_tags())
@property
def name(self):
return self._name
@property
def raw_version(self):
return self._raw_version
def _iter_tags(self):
for py in self._py_tag.split('.'):
for abi in self._abi_tag.split('.'):
for arch in self._arch_tag.split('.'):
for real_arch in PEP425Extras.platform_iterator(arch):
yield (py, abi, real_arch)
def compatible(self, identity, platform=Platform.current()):
for tag in PEP425.iter_supported_tags(identity, platform):
if tag in self._supported_tags:
return True
return False
Package.register(SourcePackage)
Package.register(EggPackage)
Package.register(WheelPackage)
def distribution_compatible(dist, interpreter=None, platform=None):
"""Is this distribution compatible with the given interpreter/platform combination?
:param interpreter: The Python interpreter against which compatibility should be checked. If None
specified, the current interpreter is used.
:type identity: :class:`PythonInterpreter` or None
:param platform: The platform against which compatibility should be checked. If None, the current
platform will be used
:type platform: string or None
:returns: True if the distribution is compatible, False if it is unrecognized or incompatible.
"""
interpreter = interpreter or PythonInterpreter.get()
platform = platform or Platform.current()
package = Package.from_href(dist.location)
if not package:
return False
return package.compatible(interpreter.identity, platform=platform)
|
apache-2.0
|
gao-wei/qualitybots
|
src/appengine/common/useragent_parser_test.py
|
26
|
8267
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for UAParser(useragent_parser.py) module."""
import unittest
import useragent_parser
class UAParserTest(unittest.TestCase):
TESTDATA_CHROME = [
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US) '
'AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13',
'browser_family': 'chrome', 'browser_version': '0.2.149.27',
'os_family': 'windows', 'os_version': 'win_2000',
'le_family': 'applewebkit', 'le_version': '525.13'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) '
'AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.2 Safari/530.5',
'browser_family': 'chrome', 'browser_version': '2.0.172.2',
'os_family': 'windows', 'os_version': 'win_xp',
'le_family': 'applewebkit', 'le_version': '530.5'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) '
'AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.43 Safari/530.5',
'browser_family': 'chrome', 'browser_version': '2.0.172.43',
'os_family': 'windows', 'os_version': 'win_xp',
'le_family': 'applewebkit', 'le_version': '530.5'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) '
'AppleWebKit/530.6 (KHTML, like Gecko) Chrome/2.0.174.0 Safari/530.6',
'browser_family': 'chrome', 'browser_version': '2.0.174.0',
'os_family': 'windows', 'os_version': 'win_vista',
'le_family': 'applewebkit', 'le_version': '530.6'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) '
'AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14',
'browser_family': 'chrome', 'browser_version': '10.0.601.0',
'os_family': 'windows', 'os_version': 'win_7',
'le_family': 'applewebkit', 'le_version': '534.14'},
{'user_agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.30 '
'(KHTML, like Gecko) Chrome/12.0.742.53 Safari/534.30',
'browser_family': 'chrome', 'browser_version': '12.0.742.53',
'os_family': 'windows', 'os_version': 'win_7',
'le_family': 'applewebkit', 'le_version': '534.30'},
{'user_agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.36 '
'(KHTML, like Gecko) Chrome/13.0.766.0 Safari/534.36',
'browser_family': 'chrome', 'browser_version': '13.0.766.0',
'os_family': 'linux', 'os_version': 'unknown',
'le_family': 'applewebkit', 'le_version': '534.36'},
{'user_agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.35 (KHTML,'
' like Gecko) Ubuntu/10.10 Chromium/13.0.764.0 Chrome/13.0.764.0 '
'Safari/534.35',
'browser_family': 'chrome', 'browser_version': '13.0.764.0',
'os_family': 'linux', 'os_version': 'unknown',
'le_family': 'applewebkit', 'le_version': '534.35'},
{'user_agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-US)'
' AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 '
'Safari/534.16',
'browser_family': 'chrome', 'browser_version': '10.0.648.133',
'os_family': 'macintosh', 'os_version': '10_6_2',
'le_family': 'applewebkit', 'le_version': '534.16'},
{'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) '
'AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.698.0 Safari/534.24',
'browser_family': 'chrome', 'browser_version': '11.0.698.0',
'os_family': 'macintosh', 'os_version': '10_6_6',
'le_family': 'applewebkit', 'le_version': '534.24'},
{'user_agent': 'Mozilla/5.0 (X11; U; CrOS i686 0.9.130; en-US) '
'AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.344 '
'Safari/534.10',
'browser_family': 'chrome', 'browser_version': '8.0.552.344',
'os_family': 'cros', 'os_version': '0.9.130',
'le_family': 'applewebkit', 'le_version': '534.10'}
]
TESTDATA_FIREFOX = [
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU; rv:1.7.7)'
' Gecko/20050414 Firefox/1.0.3',
'browser_family': 'firefox', 'browser_version': '1.0.3',
'os_family': 'windows', 'os_version': 'win_xp',
'le_family': 'gecko', 'le_version': 'rv:1.7.7'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-GB; rv:1.7.6)'
' Gecko/20050321 Firefox/1.0.2',
'browser_family': 'firefox', 'browser_version': '1.0.2',
'os_family': 'windows', 'os_version': 'win_2000',
'le_family': 'gecko', 'le_version': 'rv:1.7.6'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; de; rv:1.9.1.11)'
' Gecko/20100701 Firefox/3.5.11 ( .NET CLR 3.5.30729; .NET4.0C)',
'browser_family': 'firefox', 'browser_version': '3.5.11',
'os_family': 'windows', 'os_version': 'win_7',
'le_family': 'gecko', 'le_version': 'rv:1.9.1.11'},
{'user_agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; ja; rv:1.9.2.4)'
' Gecko/20100513 Firefox/3.6.4 ( .NET CLR 3.5.30729)',
'browser_family': 'firefox', 'browser_version': '3.6.4',
'os_family': 'windows', 'os_version': 'win_vista',
'le_family': 'gecko', 'le_version': 'rv:1.9.2.4'},
{'user_agent': 'Mozilla/5.0 (X11; U; Linux x86_64; fr; rv:1.9.0.19) '
'Gecko/2010051407 CentOS/3.0.19-1.el5.centos Firefox/3.0.19',
'browser_family': 'firefox', 'browser_version': '3.0.19',
'os_family': 'linux', 'os_version': 'unknown',
'le_family': 'gecko', 'le_version': 'rv:1.9.0.19'},
{'user_agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; de; '
'rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12 GTB5',
'browser_family': 'firefox', 'browser_version': '3.6.12',
'os_family': 'macintosh', 'os_version': '10.6',
'le_family': 'gecko', 'le_version': 'rv:1.9.2.12'},
{'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b8) '
'Gecko/20100101 Firefox/4.0b8',
'browser_family': 'firefox', 'browser_version': '4.0b8',
'os_family': 'macintosh', 'os_version': '10.6',
'le_family': 'gecko', 'le_version': 'rv:2.0b8'},
{'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b11pre'
') Gecko/20110126 Firefox/4.0b11pre',
'browser_family': 'firefox', 'browser_version': '4.0b11pre',
'os_family': 'macintosh', 'os_version': '10.6',
'le_family': 'gecko', 'le_version': 'rv:2.0b11pre'}
]
def testUAParser_Chrome(self):
for testdata in self.TESTDATA_CHROME:
parser = useragent_parser.UAParser(testdata['user_agent'])
self.assertEqual(parser.GetBrowserFamily(), testdata['browser_family'])
self.assertEqual(parser.GetBrowserVersion(), testdata['browser_version'])
self.assertEqual(parser.GetOSFamily(), testdata['os_family'])
self.assertEqual(parser.GetOSVersion(), testdata['os_version'])
self.assertEqual(parser.GetLayoutEngineFamily(), testdata['le_family'])
self.assertEqual(parser.GetLayoutEngineVersion(), testdata['le_version'])
def testUAParser_Firefox(self):
for testdata in self.TESTDATA_FIREFOX:
parser = useragent_parser.UAParser(testdata['user_agent'])
self.assertEqual(parser.GetBrowserFamily(), testdata['browser_family'])
self.assertEqual(parser.GetBrowserVersion(), testdata['browser_version'])
self.assertEqual(parser.GetOSFamily(), testdata['os_family'])
self.assertEqual(parser.GetOSVersion(), testdata['os_version'])
self.assertEqual(parser.GetLayoutEngineFamily(), testdata['le_family'])
self.assertEqual(parser.GetLayoutEngineVersion(), testdata['le_version'])
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
terranodo/geonode
|
geonode/base/management/commands/fixsitename.py
|
22
|
1462
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from urlparse import urlsplit
class Command(BaseCommand):
"""Overrides the default Site object with information from
SITENAME and SITEURL
"""
can_import_settings = True
def handle(self, *args, **options):
from django.conf import settings
name = getattr(settings, 'SITENAME', 'GeoNode')
url = getattr(settings, 'SITEURL')
parsed = urlsplit(url)
site = Site.objects.get_current()
site.name = name
site.domain = parsed.netloc
site.save()
|
gpl-3.0
|
graysonc/vim
|
autoload/conque_term/conque_sole_communicator.py
|
30
|
5055
|
# FILE: autoload/conque_term/conque_sole_communicator.py
# AUTHOR: Nico Raffo <[email protected]>
# WEBSITE: http://conque.googlecode.com
# MODIFIED: 2011-09-02
# VERSION: 2.3, for Vim 7.0
# LICENSE:
# Conque - Vim terminal/console emulator
# Copyright (C) 2009-2011 Nico Raffo
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
ConqueSoleCommunicator
This script will create a new Windows console and start the requested program
inside of it. This process is launched independently from the parent Vim
program, so it has no access to the vim module.
The main loop in this script reads data from the console and syncs it onto
blocks of memory shared with the Vim process. In this way the Vim process
and this script can communicate with each other.
"""
import time
import sys
from conque_globals import *
from conque_win32_util import *
from conque_sole_subprocess import *
from conque_sole_shared_memory import *
##############################################################
# only run if this file was run directly
if __name__ == '__main__':
# attempt to catch ALL exceptions to fend of zombies
try:
# simple arg validation
if len(sys.argv) < 5:
exit()
# maximum time this thing reads. 0 means no limit. Only for testing.
max_loops = 0
# read interval, in seconds
sleep_time = 0.01
# idle read interval, in seconds
idle_sleep_time = 0.10
# are we idled?
is_idle = False
# mem key
mem_key = sys.argv[1]
# console width
console_width = int(sys.argv[2])
# console height
console_height = int(sys.argv[3])
# code page
code_page = int(sys.argv[4])
# code page
fast_mode = int(sys.argv[5])
# the actual subprocess to run
cmd_line = " ".join(sys.argv[6:])
# width and height
options = {'LINES': console_height, 'COLUMNS': console_width, 'CODE_PAGE': code_page, 'FAST_MODE': fast_mode}
# set initial idle status
shm_command = ConqueSoleSharedMemory(CONQUE_SOLE_COMMANDS_SIZE, 'command', mem_key, serialize=True)
shm_command.create('write')
cmd = shm_command.read()
if cmd:
if cmd['cmd'] == 'idle':
is_idle = True
shm_command.clear()
##############################################################
# Create the subprocess
proc = ConqueSoleSubprocess()
res = proc.open(cmd_line, mem_key, options)
if not res:
exit()
##############################################################
# main loop!
loops = 0
while True:
# check for idle/resume
if is_idle or loops % 25 == 0:
# check process health
if not proc.is_alive():
proc.close()
break
# check for change in buffer focus
cmd = shm_command.read()
if cmd:
if cmd['cmd'] == 'idle':
is_idle = True
shm_command.clear()
elif cmd['cmd'] == 'resume':
is_idle = False
shm_command.clear()
# sleep between loops if moderation is requested
if sleep_time > 0:
if is_idle:
time.sleep(idle_sleep_time)
else:
time.sleep(sleep_time)
# write, read, etc
proc.write()
proc.read()
# increment loops, and exit if max has been reached
loops += 1
if max_loops and loops >= max_loops:
break
##############################################################
# all done!
proc.close()
# if an exception was thrown, croak
except:
proc.close()
# vim:foldmethod=marker
|
mit
|
larsbutler/swift
|
swift/common/middleware/ratelimit.py
|
42
|
13190
|
# Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from swift import gettext_ as _
import eventlet
from swift.common.utils import cache_from_env, get_logger, register_swift_info
from swift.proxy.controllers.base import get_account_info, get_container_info
from swift.common.memcached import MemcacheConnectionError
from swift.common.swob import Request, Response
def interpret_conf_limits(conf, name_prefix, info=None):
"""
Parses general parms for rate limits looking for things that
start with the provided name_prefix within the provided conf
and returns lists for both internal use and for /info
:param conf: conf dict to parse
:param name_prefix: prefix of config parms to look for
:param info: set to return extra stuff for /info registration
"""
conf_limits = []
for conf_key in conf:
if conf_key.startswith(name_prefix):
cont_size = int(conf_key[len(name_prefix):])
rate = float(conf[conf_key])
conf_limits.append((cont_size, rate))
conf_limits.sort()
ratelimits = []
conf_limits_info = list(conf_limits)
while conf_limits:
cur_size, cur_rate = conf_limits.pop(0)
if conf_limits:
next_size, next_rate = conf_limits[0]
slope = (float(next_rate) - float(cur_rate)) \
/ (next_size - cur_size)
def new_scope(cur_size, slope, cur_rate):
# making new scope for variables
return lambda x: (x - cur_size) * slope + cur_rate
line_func = new_scope(cur_size, slope, cur_rate)
else:
line_func = lambda x: cur_rate
ratelimits.append((cur_size, cur_rate, line_func))
if info is None:
return ratelimits
else:
return ratelimits, conf_limits_info
def get_maxrate(ratelimits, size):
"""
Returns number of requests allowed per second for given size.
"""
last_func = None
if size:
size = int(size)
for ratesize, rate, func in ratelimits:
if size < ratesize:
break
last_func = func
if last_func:
return last_func(size)
return None
class MaxSleepTimeHitError(Exception):
pass
class RateLimitMiddleware(object):
"""
Rate limiting middleware
Rate limits requests on both an Account and Container level. Limits are
configurable.
"""
BLACK_LIST_SLEEP = 1
def __init__(self, app, conf, logger=None):
self.app = app
self.logger = logger or get_logger(conf, log_route='ratelimit')
self.memcache_client = None
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
self.max_sleep_time_seconds = \
float(conf.get('max_sleep_time_seconds', 60))
self.log_sleep_time_seconds = \
float(conf.get('log_sleep_time_seconds', 0))
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
self.ratelimit_whitelist = \
[acc.strip() for acc in
conf.get('account_whitelist', '').split(',') if acc.strip()]
self.ratelimit_blacklist = \
[acc.strip() for acc in
conf.get('account_blacklist', '').split(',') if acc.strip()]
self.container_ratelimits = interpret_conf_limits(
conf, 'container_ratelimit_')
self.container_listing_ratelimits = interpret_conf_limits(
conf, 'container_listing_ratelimit_')
def get_container_size(self, env):
rv = 0
container_info = get_container_info(
env, self.app, swift_source='RL')
if isinstance(container_info, dict):
rv = container_info.get(
'object_count', container_info.get('container_size', 0))
return rv
def get_ratelimitable_key_tuples(self, req, account_name,
container_name=None, obj_name=None,
global_ratelimit=None):
"""
Returns a list of key (used in memcache), ratelimit tuples. Keys
should be checked in order.
:param req: swob request
:param account_name: account name from path
:param container_name: container name from path
:param obj_name: object name from path
:param global_ratelimit: this account has an account wide
ratelimit on all writes combined
"""
keys = []
# COPYs are not limited
if self.account_ratelimit and \
account_name and container_name and not obj_name and \
req.method in ('PUT', 'DELETE'):
keys.append(("ratelimit/%s" % account_name,
self.account_ratelimit))
if account_name and container_name and obj_name and \
req.method in ('PUT', 'DELETE', 'POST', 'COPY'):
container_size = self.get_container_size(req.environ)
container_rate = get_maxrate(
self.container_ratelimits, container_size)
if container_rate:
keys.append((
"ratelimit/%s/%s" % (account_name, container_name),
container_rate))
if account_name and container_name and not obj_name and \
req.method == 'GET':
container_size = self.get_container_size(req.environ)
container_rate = get_maxrate(
self.container_listing_ratelimits, container_size)
if container_rate:
keys.append((
"ratelimit_listing/%s/%s" % (account_name, container_name),
container_rate))
if account_name and req.method in ('PUT', 'DELETE', 'POST', 'COPY'):
if global_ratelimit:
try:
global_ratelimit = float(global_ratelimit)
if global_ratelimit > 0:
keys.append((
"ratelimit/global-write/%s" % account_name,
global_ratelimit))
except ValueError:
pass
return keys
def _get_sleep_time(self, key, max_rate):
'''
Returns the amount of time (a float in seconds) that the app
should sleep.
:param key: a memcache key
:param max_rate: maximum rate allowed in requests per second
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
'''
try:
now_m = int(round(time.time() * self.clock_accuracy))
time_per_request_m = int(round(self.clock_accuracy / max_rate))
running_time_m = self.memcache_client.incr(
key, delta=time_per_request_m)
need_to_sleep_m = 0
if (now_m - running_time_m >
self.rate_buffer_seconds * self.clock_accuracy):
next_avail_time = int(now_m + time_per_request_m)
self.memcache_client.set(key, str(next_avail_time),
serialize=False)
else:
need_to_sleep_m = \
max(running_time_m - now_m - time_per_request_m, 0)
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
# treat as no-op decrement time
self.memcache_client.decr(key, delta=time_per_request_m)
raise MaxSleepTimeHitError(
"Max Sleep Time Exceeded: %.2f" %
(float(need_to_sleep_m) / self.clock_accuracy))
return float(need_to_sleep_m) / self.clock_accuracy
except MemcacheConnectionError:
return 0
def handle_ratelimit(self, req, account_name, container_name, obj_name):
'''
Performs rate limiting and account white/black listing. Sleeps
if necessary. If self.memcache_client is not set, immediately returns
None.
:param account_name: account name from path
:param container_name: container name from path
:param obj_name: object name from path
'''
if not self.memcache_client:
return None
try:
account_info = get_account_info(req.environ, self.app,
swift_source='RL')
account_global_ratelimit = \
account_info.get('sysmeta', {}).get('global-write-ratelimit')
except ValueError:
account_global_ratelimit = None
if account_name in self.ratelimit_whitelist or \
account_global_ratelimit == 'WHITELIST':
return None
if account_name in self.ratelimit_blacklist or \
account_global_ratelimit == 'BLACKLIST':
self.logger.error(_('Returning 497 because of blacklisting: %s'),
account_name)
eventlet.sleep(self.BLACK_LIST_SLEEP)
return Response(status='497 Blacklisted',
body='Your account has been blacklisted',
request=req)
for key, max_rate in self.get_ratelimitable_key_tuples(
req, account_name, container_name=container_name,
obj_name=obj_name, global_ratelimit=account_global_ratelimit):
try:
need_to_sleep = self._get_sleep_time(key, max_rate)
if self.log_sleep_time_seconds and \
need_to_sleep > self.log_sleep_time_seconds:
self.logger.warning(
_("Ratelimit sleep log: %(sleep)s for "
"%(account)s/%(container)s/%(object)s"),
{'sleep': need_to_sleep, 'account': account_name,
'container': container_name, 'object': obj_name})
if need_to_sleep > 0:
eventlet.sleep(need_to_sleep)
except MaxSleepTimeHitError as e:
self.logger.error(
_('Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s '
'. Ratelimit (Max Sleep) %(e)s'),
{'meth': req.method, 'acc': account_name,
'cont': container_name, 'obj': obj_name, 'e': str(e)})
error_resp = Response(status='498 Rate Limited',
body='Slow down', request=req)
return error_resp
return None
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
req = Request(env)
if self.memcache_client is None:
self.memcache_client = cache_from_env(env)
if not self.memcache_client:
self.logger.warning(
_('Warning: Cannot ratelimit without a memcached client'))
return self.app(env, start_response)
try:
version, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(env, start_response)
ratelimit_resp = self.handle_ratelimit(req, account, container, obj)
if ratelimit_resp is None:
return self.app(env, start_response)
else:
return ratelimit_resp(env, start_response)
def filter_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating WSGI proxy apps.
"""
conf = global_conf.copy()
conf.update(local_conf)
account_ratelimit = float(conf.get('account_ratelimit', 0))
max_sleep_time_seconds = \
float(conf.get('max_sleep_time_seconds', 60))
container_ratelimits, cont_limit_info = interpret_conf_limits(
conf, 'container_ratelimit_', info=1)
container_listing_ratelimits, cont_list_limit_info = \
interpret_conf_limits(conf, 'container_listing_ratelimit_', info=1)
# not all limits are exposed (intentionally)
register_swift_info('ratelimit',
account_ratelimit=account_ratelimit,
max_sleep_time_seconds=max_sleep_time_seconds,
container_ratelimits=cont_limit_info,
container_listing_ratelimits=cont_list_limit_info)
def limit_filter(app):
return RateLimitMiddleware(app, conf)
return limit_filter
|
apache-2.0
|
ErikTromm1/Erik
|
py/openage/convert/mediafile.py
|
46
|
10925
|
# media files conversion stuff
from .colortable import ColorTable, PlayerColorTable
from collections import defaultdict
from . import dataformat
from .drs import DRS
from . import filelist
from .hardcoded import termcolors
import os
import os.path
import pickle
from string import Template
import subprocess
from .texture import Texture
from . import util
from .util import file_write, dbg, ifdbg, set_write_dir, set_read_dir, set_verbosity, file_get_path
asset_folder = "" #TODO: optimize out
dat_cache_file = "/tmp/empires2_x1_p1.dat.pickle"
class ExtractionRule:
"""
rule for matching media file names
"""
def __init__(self, rulestr):
drsname, fname = rulestr.split(':')
fnostr, fext = fname.split('.')
if drsname == '*':
drsname = None
if fnostr == '*':
fno = None
else:
fno = int(fnostr)
if fext == '*':
fext = None
self.drsname = drsname
self.fno = fno
self.fext = fext
def matches(self, drsname, fno, fext):
if self.drsname and self.drsname != drsname:
return False
if self.fno and self.fno != fno:
return False
if self.fext and self.fext != fext:
return False
return True
def media_convert(args):
#assume to extract all files when nothing specified.
if not args.extract:
args.extract.append('*:*.*')
extraction_rules = [ ExtractionRule(e) for e in args.extract ]
#set path in utility class
dbg("setting age2 input directory to " + args.srcdir, 1)
set_read_dir(args.srcdir)
drsfiles = {
"graphics": DRS("Data/graphics.drs"),
"interface": DRS("Data/interfac.drs"),
"sounds0": DRS("Data/sounds.drs"),
"sounds1": DRS("Data/sounds_x1.drs"),
"gamedata1": DRS("Data/gamedata_x1.drs"),
"gamedata2": DRS("Data/gamedata_x1_p1.drs"),
"terrain": DRS("Data/terrain.drs")
}
#gamedata.drs does not exist in HD edition, but its contents are
#in gamedata_x1.drs instead, so we can ignore this file if it doesn't exist
if os.path.isfile(file_get_path("Data/gamedata.drs")):
drsfiles["gamedata0"] = DRS("Data/gamedata.drs")
#this is the ingame color palette file id, 256 color lookup for all graphics pixels
palette_id = 50500
palette = ColorTable(drsfiles["interface"].get_file_data('bin', palette_id))
#metadata dumping output format, more to come?
output_formats = ("csv",)
termcolortable = ColorTable(termcolors.urxvtcoltable)
#write mode is disabled by default, unless destdir is set
#saving files is disabled by default
write_enabled = False
if args.output:
from .slp import SLP
write_enabled = True
dbg("setting write dir to " + args.output, 1)
set_write_dir(args.output)
player_palette = PlayerColorTable(palette)
if args.extrafiles:
palette.save_visualization('info/colortable.pal.png')
player_palette.save_visualization('info/playercolortable.pal.png')
from . import blendomatic
#HD Edition has a blendomatic_x1.dat in addition to its new blendomatic.dat
#blendomatic_x1.dat is the same file as AoK:TC's blendomatic.dat, and TC does not have
#blendomatic.dat, so we try _x1 first and fall back to the AoK:TC way if it does not exist
blend_file = "Data/blendomatic_x1.dat"
if not os.path.isfile(file_get_path(blend_file)):
blend_file = "Data/blendomatic.dat"
blend_data = blendomatic.Blendomatic(blend_file)
blend_data.save(os.path.join(asset_folder, "blendomatic.dat/"), output_formats)
from .stringresource import StringResource
stringres = StringResource()
#AoK:TC uses .DLL files for its string resources,
#HD uses plaintext files
if os.path.isfile(file_get_path("language.dll")):
from .pefile import PEFile
stringres.fill_from(PEFile("language.dll"))
stringres.fill_from(PEFile("language_x1.dll"))
stringres.fill_from(PEFile("language_x1_p1.dll"))
#stringres.fill_from(PEFile("Games/Forgotten Empires/Data/language_x1_p1.dll"))
else:
from .hdlanguagefile import HDLanguageFile
for lang in os.listdir(file_get_path("Bin")):
langfile = "Bin/%s/%s-language.txt" % (lang, lang)
#there is some "base language" files in HD that we don't need
#and only the dir for the language that's currently in use contains a language file
if os.path.isdir(file_get_path("Bin/%s" % (lang))) and os.path.isfile(file_get_path(langfile)):
stringres.fill_from(HDLanguageFile(langfile, lang))
#TODO: transform and cleanup the read strings... (strip html, insert formatchars, ...)
#create the dump for the dat file
from .gamedata import empiresdat
datfile_name = "empires2_x1_p1.dat"
#try to use cached version?
parse_empiresdat = False
if args.use_dat_cache:
dbg("trying to use cache file %s..." % (dat_cache_file), lvl=1)
try:
with open(dat_cache_file, "rb") as f:
gamedata = pickle.load(f)
dbg("could successfully load cached gamedata!", lvl=1)
except FileNotFoundError as err:
parse_empiresdat = True
if not args.use_dat_cache or parse_empiresdat:
datfile = empiresdat.EmpiresDatGzip("Data/%s" % datfile_name)
gamedata = empiresdat.EmpiresDatWrapper()
if args.extrafiles:
datfile.raw_dump('raw/empires2x1p1.raw')
dbg("reading main data file %s..." % (datfile_name), lvl=1)
gamedata.read(datfile.content, 0)
#store the datfile serialization for caching
with open(dat_cache_file, "wb") as f:
pickle.dump(gamedata, f)
#modify the read contents of datfile
dbg("repairing some values in main data file %s..." % (datfile_name), lvl=1)
from . import fix_data
gamedata.empiresdat[0] = fix_data.fix_data(gamedata.empiresdat[0])
#dbg("transforming main data file %s..." % (datfile_name), lvl=1)
#TODO: data transformation nao! (merge stuff, etcetc)
dbg("formatting output data...", lvl=1)
data_formatter = dataformat.DataFormatter()
#dump metadata information
data_dump = list()
data_dump += blend_data.dump("blending_modes")
data_dump += player_palette.dump("player_palette_%d" % palette_id)
data_dump += termcolortable.dump("termcolors")
data_dump += stringres.dump("string_resources")
data_formatter.add_data(data_dump)
#dump gamedata datfile data
gamedata_dump = gamedata.dump("gamedata")
data_formatter.add_data(gamedata_dump[0], prefix="gamedata/")
output_data = data_formatter.export(output_formats)
#save the meta files
dbg("saving output data files...", lvl=1)
util.file_write_multi(output_data, file_prefix=asset_folder)
file_list = defaultdict(lambda: list())
media_files_extracted = 0
sound_list = filelist.SoundList()
#iterate over all available files in the drs, check whether they should be extracted
for drsname, drsfile in drsfiles.items():
for file_extension, file_id in drsfile.files:
if not any(er.matches(drsname, file_id, file_extension) for er in extraction_rules):
continue
#append this file to the list result
if args.list_files:
file_list[file_id].append((drsfile.fname, file_extension))
continue
#generate output filename where data will be stored in
if write_enabled:
fbase = os.path.join(asset_folder, drsfile.fname, str(file_id))
fname = "%s.%s" % (fbase, file_extension)
dbg("Extracting to %s..." % (fname), 2)
file_data = drsfile.get_file_data(file_extension, file_id)
else:
continue
if file_extension == 'slp':
s = SLP(file_data)
out_file_tmp = "%s: %d.%s" % (drsname, file_id, file_extension)
dbg("%s -> %s -> generating atlas" % (out_file_tmp, fname), 1)
#create exportable texture from the slp
texture = Texture(s, palette)
#save the image and the corresponding metadata file
texture.save(fname, output_formats)
elif file_extension == 'wav':
sound_filename = fname
wav_output_file = file_get_path(fname, write=True)
file_write(wav_output_file, file_data)
if not args.no_opus:
file_extension = "opus"
sound_filename = "%s.%s" % (fbase, file_extension)
opus_output_file = file_get_path(sound_filename, write=True)
#opusenc invokation (TODO: ffmpeg?)
opus_convert_call = ['opusenc', wav_output_file, opus_output_file]
dbg("opus convert: %s -> %s ..." % (fname, sound_filename), 1)
oc = subprocess.Popen(opus_convert_call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
oc_out, oc_err = oc.communicate()
if ifdbg(2):
oc_out = oc_out.decode("utf-8")
oc_err = oc_err.decode("utf-8")
dbg(oc_out + "\n" + oc_err, 2)
#remove original wave file
os.remove(wav_output_file)
#TODO: this is redundant here, but we need to strip the assets/ part..
filelist_fname = "%s.%s" % (os.path.join(drsfile.fname, str(file_id)), file_extension)
sound_list.add_sound(file_id, filelist_fname, file_extension)
else:
#format does not require conversion, store it as plain blob
output_file = file_get_path(fname, write=True)
file_write(output_file, file_data)
media_files_extracted += 1
if write_enabled:
sound_formatter = dataformat.DataFormatter()
sound_formatter.add_data(sound_list.dump())
util.file_write_multi(sound_formatter.export(output_formats), file_prefix=asset_folder)
dbg("media files extracted: %d" % (media_files_extracted), 0)
#was a file listing requested?
if args.list_files:
for idx, f in file_list.items():
print("%d = [ %s ]" % (idx, ", ".join(
"%s/%d.%s" % (file_name, idx, file_extension) for file_name, file_extension in f)))
|
gpl-3.0
|
thomdixon/elasticsearch-py
|
docs/conf.py
|
9
|
8459
|
# -*- coding: utf-8 -*-
#
# Elasticsearch documentation build configuration file, created by
# sphinx-quickstart on Mon May 6 15:38:41 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Elasticsearch'
copyright = u'2013, Honza Král'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6.0'
# The full version, including alpha/beta/rc tags.
release = '1.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Elasticsearchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Elasticsearch.tex', u'Elasticsearch Documentation',
u'Honza Král', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'elasticsearch', u'Elasticsearch Documentation',
[u'Honza Král'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Elasticsearch', u'Elasticsearch Documentation',
u'Honza Král', 'Elasticsearch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
|
kenxwagner/PythonPlay
|
Project/webscrap/websc/Lib/site-packages/setuptools/command/install_egg_info.py
|
412
|
2203
|
from distutils import log, dir_util
import os
from setuptools import Command
from setuptools import namespaces
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(namespaces.Installer, Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = []
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
|
mit
|
danicampora/micropython
|
tests/micropython/viper_misc.py
|
37
|
2637
|
import micropython
# viper function taking and returning ints
@micropython.viper
def viper_int(x:int, y:int) -> int:
return x + y + 3
print(viper_int(1, 2))
# viper function taking and returning objects
@micropython.viper
def viper_object(x:object, y:object) -> object:
return x + y
print(viper_object(1, 2))
# return None as non-object (should return 0)
@micropython.viper
def viper_ret_none() -> int:
return None
print(viper_ret_none())
# return Ellipsis as object
@micropython.viper
def viper_ret_ellipsis() -> object:
return ...
print(viper_ret_ellipsis())
# 3 args
@micropython.viper
def viper_3args(a:int, b:int, c:int) -> int:
return a + b + c
print(viper_3args(1, 2, 3))
# 4 args
@micropython.viper
def viper_4args(a:int, b:int, c:int, d:int) -> int:
return a + b + c + d
# viper call with 4 args not yet supported
#print(viper_4args(1, 2, 3, 4))
# a local (should have automatic type int)
@micropython.viper
def viper_local(x:int) -> int:
y = 4
return x + y
print(viper_local(3))
# without type annotation, types should default to object
@micropython.viper
def viper_no_annotation(x, y):
return x * y
print(viper_no_annotation(4, 5))
# unsigned ints
@micropython.viper
def viper_uint() -> uint:
return uint(-1)
import sys
print(viper_uint() == (sys.maxsize << 1 | 1))
# a for loop
@micropython.viper
def viper_for(a:int, b:int) -> int:
total = 0
for x in range(a, b):
total += x
return total
print(viper_for(10, 10000))
# accessing a global
@micropython.viper
def viper_access_global():
global gl
gl = 1
return gl
print(viper_access_global(), gl)
# calling print with object and int types
@micropython.viper
def viper_print(x, y:int):
print(x, y + 1)
viper_print(1, 2)
# convert constants to objects in tuple
@micropython.viper
def viper_tuple_consts(x):
return (x, 1, False, True)
print(viper_tuple_consts(0))
# making a tuple from an object and an int
@micropython.viper
def viper_tuple(x, y:int):
return (x, y + 1)
print(viper_tuple(1, 2))
# making a list from an object and an int
@micropython.viper
def viper_list(x, y:int):
return [x, y + 1]
print(viper_list(1, 2))
# making a set from an object and an int
@micropython.viper
def viper_set(x, y:int):
return {x, y + 1}
print(sorted(list(viper_set(1, 2))))
# raising an exception
@micropython.viper
def viper_raise(x:int):
raise OSError(x)
try:
viper_raise(1)
except OSError as e:
print(repr(e))
# calling GC after defining the function
@micropython.viper
def viper_gc() -> int:
return 1
print(viper_gc())
import gc
gc.collect()
print(viper_gc())
|
mit
|
hchen1202/django-react
|
virtualenv/lib/python3.6/site-packages/django/contrib/sessions/backends/cached_db.py
|
118
|
2813
|
"""
Cached, database-backed sessions.
"""
import logging
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.cache import caches
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
KEY_PREFIX = "django.contrib.sessions.cached_db"
class SessionStore(DBStore):
"""
Implements cached, database backed sessions.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
def load(self):
try:
data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
# Duplicate DBStore.load, because we need to keep track
# of the expiry date to set it properly in the cache.
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
data = self.decode(s.session_data)
self._cache.set(self.cache_key, data, self.get_expiry_age(expiry=s.expire_date))
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
data = {}
return data
def exists(self, session_key):
if session_key and (self.cache_key_prefix + session_key) in self._cache:
return True
return super(SessionStore, self).exists(session_key)
def save(self, must_create=False):
super(SessionStore, self).save(must_create)
self._cache.set(self.cache_key, self._session, self.get_expiry_age())
def delete(self, session_key=None):
super(SessionStore, self).delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete(self.session_key)
self._session_key = None
|
mit
|
krux/adspygoogle
|
examples/adspygoogle/dfp/v201204/create_teams.py
|
2
|
1659
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates new teams.
To determine which teams exist, run get_all_teams.py.
Tags: TeamService.createTeams
"""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v201204')
# Create team objects.
teams = []
for i in xrange(5):
team = {
'name': 'Team #%d' % i,
'hasAllCompanies': 'false',
'hasAllInventory': 'false'
}
teams.append(team)
# Add Teams.
teams = team_service.CreateTeams(teams)
# Display results.
for team in teams:
print ('Team with ID \'%s\' and name \'%s\' was created.'
% (team['id'], team['name']))
|
apache-2.0
|
monikasulik/django-oscar
|
tests/integration/address/form_tests.py
|
39
|
1320
|
from django.test import TestCase
from oscar.apps.address import models, forms
from oscar.core.compat import get_user_model
from oscar.test.factories import UserFactory
class TestUserAddressForm(TestCase):
def setUp(self):
self.user = UserFactory()
self.country = models.Country.objects.create(
iso_3166_1_a2='GB', name="UNITED KINGDOM")
def test_merges_addresses_with_same_hash(self):
data = {
'user': self.user,
'first_name': "Matus",
'last_name': "Moravcik",
'line1': "1 Egg Street",
'line4': "London",
'postcode': "N12 9RE",
'country': self.country}
# Create two addresses, which are slightly different
models.UserAddress.objects.create(**data)
other = data.copy()
other['first_name'] = 'Izidor'
duplicate = models.UserAddress.objects.create(**other)
# Edit duplicate to be same as original and check that the two
# addresses are merged when the form saves.
post_data = data.copy()
post_data['country'] = self.country.iso_3166_1_a2
form = forms.UserAddressForm(self.user, post_data, instance=duplicate)
self.assertFalse(form.is_valid())
self.assertTrue(len(form.errors['__all__']) > 0)
|
bsd-3-clause
|
dweinstein/finsky
|
finsky/protos/play_survey_pb2.py
|
2
|
8956
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: play_survey.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import common_pb2 as common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='play_survey.proto',
package='PlaySurvey',
syntax='proto2',
serialized_pb=_b('\n\x11play_survey.proto\x12\nPlaySurvey\x1a\x0c\x63ommon.proto\"Z\n\rSurveyContent\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08question\x18\x02 \x01(\t\x12(\n\x06\x61nswer\x18\x03 \x03(\x0b\x32\x18.PlaySurvey.SurveyAnswer\"d\n\x06Survey\x12\n\n\x02id\x18\x01 \x01(\t\x12\"\n\x06prompt\x18\x02 \x01(\x0b\x32\x12.PlaySurvey.Prompt\x12*\n\x07\x63ontent\x18\x03 \x01(\x0b\x32\x19.PlaySurvey.SurveyContent\"P\n\x06Prompt\x12\x12\n\npromptText\x18\x01 \x01(\t\x12\x18\n\x10\x61\x63\x63\x65ptButtonText\x18\x02 \x01(\t\x12\x18\n\x10rejectButtonText\x18\x03 \x01(\t\"L\n\x0cSurveyAnswer\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1b\n\x04icon\x18\x02 \x01(\x0b\x32\r.Common.Image\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\tB.\n com.google.android.finsky.protosB\nPlaySurvey')
,
dependencies=[common__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SURVEYCONTENT = _descriptor.Descriptor(
name='SurveyContent',
full_name='PlaySurvey.SurveyContent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='PlaySurvey.SurveyContent.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='question', full_name='PlaySurvey.SurveyContent.question', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='answer', full_name='PlaySurvey.SurveyContent.answer', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=137,
)
_SURVEY = _descriptor.Descriptor(
name='Survey',
full_name='PlaySurvey.Survey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='PlaySurvey.Survey.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prompt', full_name='PlaySurvey.Survey.prompt', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='content', full_name='PlaySurvey.Survey.content', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=239,
)
_PROMPT = _descriptor.Descriptor(
name='Prompt',
full_name='PlaySurvey.Prompt',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='promptText', full_name='PlaySurvey.Prompt.promptText', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='acceptButtonText', full_name='PlaySurvey.Prompt.acceptButtonText', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rejectButtonText', full_name='PlaySurvey.Prompt.rejectButtonText', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=241,
serialized_end=321,
)
_SURVEYANSWER = _descriptor.Descriptor(
name='SurveyAnswer',
full_name='PlaySurvey.SurveyAnswer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='PlaySurvey.SurveyAnswer.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='icon', full_name='PlaySurvey.SurveyAnswer.icon', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='PlaySurvey.SurveyAnswer.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=323,
serialized_end=399,
)
_SURVEYCONTENT.fields_by_name['answer'].message_type = _SURVEYANSWER
_SURVEY.fields_by_name['prompt'].message_type = _PROMPT
_SURVEY.fields_by_name['content'].message_type = _SURVEYCONTENT
_SURVEYANSWER.fields_by_name['icon'].message_type = common__pb2._IMAGE
DESCRIPTOR.message_types_by_name['SurveyContent'] = _SURVEYCONTENT
DESCRIPTOR.message_types_by_name['Survey'] = _SURVEY
DESCRIPTOR.message_types_by_name['Prompt'] = _PROMPT
DESCRIPTOR.message_types_by_name['SurveyAnswer'] = _SURVEYANSWER
SurveyContent = _reflection.GeneratedProtocolMessageType('SurveyContent', (_message.Message,), dict(
DESCRIPTOR = _SURVEYCONTENT,
__module__ = 'play_survey_pb2'
# @@protoc_insertion_point(class_scope:PlaySurvey.SurveyContent)
))
_sym_db.RegisterMessage(SurveyContent)
Survey = _reflection.GeneratedProtocolMessageType('Survey', (_message.Message,), dict(
DESCRIPTOR = _SURVEY,
__module__ = 'play_survey_pb2'
# @@protoc_insertion_point(class_scope:PlaySurvey.Survey)
))
_sym_db.RegisterMessage(Survey)
Prompt = _reflection.GeneratedProtocolMessageType('Prompt', (_message.Message,), dict(
DESCRIPTOR = _PROMPT,
__module__ = 'play_survey_pb2'
# @@protoc_insertion_point(class_scope:PlaySurvey.Prompt)
))
_sym_db.RegisterMessage(Prompt)
SurveyAnswer = _reflection.GeneratedProtocolMessageType('SurveyAnswer', (_message.Message,), dict(
DESCRIPTOR = _SURVEYANSWER,
__module__ = 'play_survey_pb2'
# @@protoc_insertion_point(class_scope:PlaySurvey.SurveyAnswer)
))
_sym_db.RegisterMessage(SurveyAnswer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n com.google.android.finsky.protosB\nPlaySurvey'))
# @@protoc_insertion_point(module_scope)
|
mit
|
evgchz/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
|
254
|
2253
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
wolfmanstout/dragonfly
|
dragonfly/actions/keyboard/_pynput.py
|
2
|
8733
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
This file implements the a keyboard interface using the *pynput* Python
package. This implementation is used for Linux (X11) and Mac OS (Darwin).
"""
import logging
import sys
import time
from pynput.keyboard import Controller, KeyCode, Key
from ._base import BaseKeyboard, Typeable as BaseTypeable
class Typeable(BaseTypeable):
""" Typeable class for pynput. """
_log = logging.getLogger("keyboard")
def __init__(self, code, modifiers=(), name=None, is_text=False):
BaseTypeable.__init__(self, code, modifiers, name, is_text)
class SafeKeyCode(object):
"""
Class to safely get key codes from pynput.
"""
def __getattr__(self, name):
# Get the key code from pynput, returning KeyCode(vk=-1, char=name)
# if the key name isn't present.
# Keys are undefined on some platforms, e.g. "pause" on Darwin.
return getattr(Key, name, KeyCode(vk=-1, char=name))
virtual_keys = SafeKeyCode()
class BaseKeySymbols(object):
""" Base key symbols for pynput. """
# Whitespace and editing keys
RETURN = virtual_keys.enter
TAB = virtual_keys.tab
SPACE = virtual_keys.space
BACK = virtual_keys.backspace
DELETE = virtual_keys.delete
# Main modifier keys
SHIFT = virtual_keys.shift
CONTROL = virtual_keys.ctrl
ALT = virtual_keys.alt
# Right modifier keys
RSHIFT = virtual_keys.shift_r
RCONTROL = virtual_keys.ctrl_r
RALT = virtual_keys.alt_r
# Special keys
ESCAPE = virtual_keys.esc
INSERT = virtual_keys.insert
PAUSE = virtual_keys.pause
LSUPER = virtual_keys.cmd_l
RSUPER = virtual_keys.cmd_r
APPS = virtual_keys.menu
SNAPSHOT = virtual_keys.print_screen
# Lock keys
SCROLL_LOCK = virtual_keys.scroll_lock
NUM_LOCK = virtual_keys.num_lock
CAPS_LOCK = virtual_keys.caps_lock
# Navigation keys
UP = virtual_keys.up
DOWN = virtual_keys.down
LEFT = virtual_keys.left
RIGHT = virtual_keys.right
PAGE_UP = virtual_keys.page_up
PAGE_DOWN = virtual_keys.page_down
HOME = virtual_keys.home
END = virtual_keys.end
# Number pad keys
# pynput currently only exposes these for Windows, so we'll map them to
# equivalent characters and numbers instead.
MULTIPLY = KeyCode(char="*")
ADD = KeyCode(char="+")
SEPARATOR = KeyCode(char=".") # this is locale-dependent.
SUBTRACT = KeyCode(char="-")
DECIMAL = KeyCode(char=".")
DIVIDE = KeyCode(char="/")
NUMPAD0 = KeyCode(char="0")
NUMPAD1 = KeyCode(char="1")
NUMPAD2 = KeyCode(char="2")
NUMPAD3 = KeyCode(char="3")
NUMPAD4 = KeyCode(char="4")
NUMPAD5 = KeyCode(char="5")
NUMPAD6 = KeyCode(char="6")
NUMPAD7 = KeyCode(char="7")
NUMPAD8 = KeyCode(char="8")
NUMPAD9 = KeyCode(char="9")
# Function keys
# F13-20 don't work on X11 with pynput because they are not usually
# part of the keyboard map.
F1 = virtual_keys.f1
F2 = virtual_keys.f2
F3 = virtual_keys.f3
F4 = virtual_keys.f4
F5 = virtual_keys.f5
F6 = virtual_keys.f6
F7 = virtual_keys.f7
F8 = virtual_keys.f8
F9 = virtual_keys.f9
F10 = virtual_keys.f10
F11 = virtual_keys.f11
F12 = virtual_keys.f12
F13 = virtual_keys.f13
F14 = virtual_keys.f14
F15 = virtual_keys.f15
F16 = virtual_keys.f16
F17 = virtual_keys.f17
F18 = virtual_keys.f18
F19 = virtual_keys.f19
F20 = virtual_keys.f20
class X11KeySymbols(BaseKeySymbols):
"""
Symbols for X11 from pynput.
This class includes extra symbols matching those that dragonfly's Win32
keyboard interface provides.
"""
# Number pad keys
# Retrieved from /usr/include/X11/keysymdef.h on Debian 9.
MULTIPLY = KeyCode.from_vk(0xffaa)
ADD = KeyCode.from_vk(0xffab)
SEPARATOR = KeyCode.from_vk(0xffac)
SUBTRACT = KeyCode.from_vk(0xffad)
DECIMAL = KeyCode.from_vk(0xffae)
DIVIDE = KeyCode.from_vk(0xffaf)
NUMPAD0 = KeyCode.from_vk(0xffb0)
NUMPAD1 = KeyCode.from_vk(0xffb1)
NUMPAD2 = KeyCode.from_vk(0xffb2)
NUMPAD3 = KeyCode.from_vk(0xffb3)
NUMPAD4 = KeyCode.from_vk(0xffb4)
NUMPAD5 = KeyCode.from_vk(0xffb5)
NUMPAD6 = KeyCode.from_vk(0xffb6)
NUMPAD7 = KeyCode.from_vk(0xffb7)
NUMPAD8 = KeyCode.from_vk(0xffb8)
NUMPAD9 = KeyCode.from_vk(0xffb9)
# Function keys F21-F24.
# Retrieved from /usr/include/X11/keysymdef.h on Debian 9.
# These keys don't work on X11 with pynput because they are not usually
# part of the keyboard map. They are set here to avoid some warnings
# and because the Windows keyboard supports them.
F21 = KeyCode.from_vk(0xffd1)
F22 = KeyCode.from_vk(0xffd2)
F23 = KeyCode.from_vk(0xffd3)
F24 = KeyCode.from_vk(0xffd4)
# Multimedia keys
# Retrieved from /usr/include/X11/XF86keysym.h on Debian 9.
# These should work on Debian-based distributions like Ubunutu, but
# might not work using different X11 server implementations because the
# symbols are vendor-specific.
# Any errors raised when typing these or any other keys will be caught
# and logged.
VOLUME_UP = KeyCode.from_vk(0x1008FF13)
VOLUME_DOWN = KeyCode.from_vk(0x1008FF11)
VOLUME_MUTE = KeyCode.from_vk(0x1008FF12)
MEDIA_NEXT_TRACK = KeyCode.from_vk(0x1008FF17)
MEDIA_PREV_TRACK = KeyCode.from_vk(0x1008FF16)
MEDIA_PLAY_PAUSE = KeyCode.from_vk(0x1008FF14)
BROWSER_BACK = KeyCode.from_vk(0x1008FF26)
BROWSER_FORWARD = KeyCode.from_vk(0x1008FF27)
class DarwinKeySymbols(BaseKeySymbols):
"""
Symbols for Darwin from pynput.
This class includes some extra symbols to prevent errors in
typeables.py.
All extras will be disabled (key code of -1).
"""
# Extra function keys.
F21 = virtual_keys.f21
F22 = virtual_keys.f22
F23 = virtual_keys.f23
F24 = virtual_keys.f24
# Multimedia keys.
VOLUME_UP = virtual_keys.volume_up
VOLUME_DOWN = virtual_keys.volume_down
VOLUME_MUTE = virtual_keys.volume_mute
MEDIA_NEXT_TRACK = virtual_keys.media_next_track
MEDIA_PREV_TRACK = virtual_keys.media_prev_track
MEDIA_PLAY_PAUSE = virtual_keys.media_play_pause
BROWSER_BACK = virtual_keys.browser_back
BROWSER_FORWARD = virtual_keys.browser_forward
class Keyboard(BaseKeyboard):
"""Static class wrapper around pynput.keyboard."""
_controller = Controller()
_log = logging.getLogger("keyboard")
@classmethod
def send_keyboard_events(cls, events):
"""
Send a sequence of keyboard events.
Positional arguments:
events -- a sequence of tuples of the form
(keycode, down, timeout), where
keycode (str|KeyCode): pynput key code.
down (boolean): True means the key will be pressed down,
False means the key will be released.
timeout (int): number of seconds to sleep after
the keyboard event.
"""
cls._log.debug("Keyboard.send_keyboard_events %r", events)
for event in events:
(key, down, timeout) = event
# Raise an error if the key is unsupported. 'key' can also be a
# string, e.g. "a", "b", "/", etc, but we don't check if those
# are valid.
if isinstance(key, KeyCode) and key.vk == -1:
raise ValueError("Unsupported key: %r" % key.char)
# Press/release the key, catching any errors.
try:
cls._controller.touch(key, down)
except Exception as e:
cls._log.exception("Failed to type key code %s: %s",
key, e)
# Sleep after the keyboard event if necessary.
if timeout:
time.sleep(timeout)
@classmethod
def get_typeable(cls, char, is_text=False):
return Typeable(char, is_text=is_text)
|
lgpl-3.0
|
blacktear23/django
|
django/conf/project_template/settings.py
|
150
|
5005
|
# Django settings for {{ project_name }} project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
bsd-3-clause
|
ElijahCaine/GrindStone
|
grindstone/tests.py
|
1
|
6880
|
#!/usr/bin/env python
"""
File: test.py
Package: grindstone
Author: Elijah Caine
Description:
Test GrindStone lib functionality
"""
from lib import GrindStone
import unittest
import shutil
import os
class TestGrindStoneLibrary(unittest.TestCase):
def setUp(self):
# We're testing everything in a /tmp/* directory to avoid overwriting
# an existing .grindstone file
self.testing_path = '/tmp/grindstone_testing/'
try:
# Make directory for testing stuffs
os.mkdir(self.testing_path)
# If that directory exists
except FileExistsError:
# Blow everything away
shutil.rmtree(self.testing_path)
os.mkdir(self.testing_path)
# Move to the testing directory
os.chdir(self.testing_path)
self.cwd = os.getcwd()
def tearDown(self):
try:
# Remove the grindstone_path
os.remove(self.gs.grindstone_path)
except FileNotFoundError:
# It's okay, this is probably supposed to happen
pass
except AttributeError:
# self.gs is not found
pass
finally:
# Burn the testing files. Burn them with fire.
shutil.rmtree(self.testing_path)
def test_cwd_path(self):
os.mkdir('./t/')
os.chdir('./t/')
self.cwd = os.getcwd()
open('.grindstone', 'w').close()
self.gs = GrindStone(self.cwd)
self.assertEqual(self.gs.grindstone_path,\
os.path.realpath(self.gs.grindstone_filename))
def test_no_path_given(self):
with self.assertRaises(ValueError) as err:
self.gs = GrindStone()
self.assertEqual(str(err.exception), 'Path must not be None')
def test_add_one_complete_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('book1', 'read the book')
self.assertEqual(self.gs.get_tasks(), [{'book1': 'read the book'}])
self.gs.write_grindstone()
with open(self.gs.grindstone_path, 'r') as f:
file_contents = f.read()
self.assertTrue('"tasks": [{"book1": "read the book"}]'\
in file_contents)
def test_add_one_shallow_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookA')
self.assertEqual(self.gs.get_tasks(), [{'bookA': None}])
self.gs.write_grindstone()
with open(self.gs.grindstone_path, 'r') as f:
file_contents = f.read()
self.assertTrue('"tasks": [{"bookA": null}]' in file_contents)
def test_add_complete_tasks(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('book1', 'read the book')
self.gs.add_task('book2', 'read the other book')
self.assertEqual(self.gs.get_tasks(),\
[{'book1': 'read the book'},\
{'book2': 'read the other book'}])
self.gs.write_grindstone()
with open(self.gs.grindstone_path, 'r') as f:
file_contents = f.read()
self.assertTrue('"tasks": [{"book1": "read the book"}, '+
'{"book2": "read the other book"}]'\
in file_contents)
def test_open_grindstone(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookAlpha', 'read the book')
self.gs.write_grindstone()
self.gs2 = GrindStone(self.cwd)
self.assertEqual(self.gs2.get_tasks(),\
[{'bookAlpha': 'read the book'}])
def test_open_modify_grindstone(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookAlpha', 'read the book')
self.gs.write_grindstone()
self.gs2 = GrindStone(self.cwd)
self.gs2.add_task('bookBeta', 'This one matters less')
self.assertEqual(self.gs2.get_tasks(),\
[{'bookAlpha': 'read the book'},
{'bookBeta': 'This one matters less'}])
self.gs2.write_grindstone()
with open(self.gs2.grindstone_path, 'r') as f:
file_contents2 = f.read()
self.assertTrue('"tasks": [{"bookAlpha": "read the book"}, '+
'{"bookBeta": "This one matters less"}]'\
in file_contents2)
def test_add_empty_task(self):
self.gs = GrindStone(self.cwd)
with self.assertRaises(ValueError) as err:
self.gs.add_task()
self.assertEqual(str(err.exception),\
'Tasks `name` cannot be None')
self.assertEqual(self.gs.get_tasks(), [])
def test_add_task_with_no_name(self):
self.gs = GrindStone(self.cwd)
with self.assertRaises(ValueError) as err:
self.gs.add_task(desc='foo')
self.assertEqual(str(err.exception),\
'Tasks `name` cannot be None')
self.assertEqual(self.gs.get_tasks(), [])
def test_fetch_empty_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookAlpha', None)
self.gs.write_grindstone()
self.gs2 = GrindStone(self.cwd)
self.assertEqual(self.gs2.get_task('bookAlpha'),
{'bookAlpha': None})
def test_fetch_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookAlpha', 'The Most Awesome Book')
self.gs.write_grindstone()
self.gs2 = GrindStone(self.cwd)
self.assertEqual(self.gs2.get_task('bookAlpha'),
{'bookAlpha': 'The Most Awesome Book'})
def test_non_existent_get_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookAlpha', 'The Most Awesome Book')
self.gs.write_grindstone()
self.gs2 = GrindStone(self.cwd)
self.assertEqual(self.gs2.get_task('bookOmega'), None)
def test_delete_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookAlpha', 'The Most Awesome Book')
self.gs.delete_task('bookAlpha')
self.assertEqual(self.gs.get_task('bookAlpha'), None)
self.assertEqual(self.gs.get_tasks(), [])
def test_delete_non_existent_task(self):
self.gs = GrindStone(self.cwd)
self.assertFalse(self.gs.delete_task('bookAlpha'))
self.assertEqual(self.gs.get_tasks(), [])
def test_fail_to_add_double_task(self):
self.gs = GrindStone(self.cwd)
self.gs.add_task('bookA')
self.assertEqual(self.gs.get_tasks(), [{'bookA': None}])
with self.assertRaises(ValueError) as err:
self.gs.add_task('bookA')
self.assertEqual(str(err.exception), 'Task already exists')
self.assertEqual(self.gs.get_tasks(), [{'bookA': None}])
if __name__ == '__main__':
unittest.main()
|
mit
|
pozdnyakov/chromium-crosswalk
|
third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_XML.py
|
33
|
24745
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
import libxml2
import re, sys, string
import typeexpr
def parse_GL_API( file_name, factory = None ):
doc = libxml2.readFile( file_name, None, libxml2.XML_PARSE_XINCLUDE + libxml2.XML_PARSE_NOBLANKS + libxml2.XML_PARSE_DTDVALID + libxml2.XML_PARSE_DTDATTR + libxml2.XML_PARSE_DTDLOAD + libxml2.XML_PARSE_NOENT )
ret = doc.xincludeProcess()
if not factory:
factory = gl_item_factory()
api = factory.create_item( "api", None, None )
api.process_element( doc )
# After the XML has been processed, we need to go back and assign
# dispatch offsets to the functions that request that their offsets
# be assigned by the scripts. Typically this means all functions
# that are not part of the ABI.
for func in api.functionIterateByCategory():
if func.assign_offset:
func.offset = api.next_offset;
api.next_offset += 1
doc.freeDoc()
return api
def is_attr_true( element, name ):
"""Read a name value from an element's attributes.
The value read from the attribute list must be either 'true' or
'false'. If the value is 'false', zero will be returned. If the
value is 'true', non-zero will be returned. An exception will be
raised for any other value."""
value = element.nsProp( name, None )
if value == "true":
return 1
elif value == "false":
return 0
else:
raise RuntimeError('Invalid value "%s" for boolean "%s".' % (value, name))
class gl_print_base:
"""Base class of all API pretty-printers.
In the model-view-controller pattern, this is the view. Any derived
class will want to over-ride the printBody, printRealHader, and
printRealFooter methods. Some derived classes may want to over-ride
printHeader and printFooter, or even Print (though this is unlikely).
"""
def __init__(self):
# Name of the script that is generating the output file.
# Every derived class should set this to the name of its
# source file.
self.name = "a"
# License on the *generated* source file. This may differ
# from the license on the script that is generating the file.
# Every derived class should set this to some reasonable
# value.
#
# See license.py for an example of a reasonable value.
self.license = "The license for this file is unspecified."
# The header_tag is the name of the C preprocessor define
# used to prevent multiple inclusion. Typically only
# generated C header files need this to be set. Setting it
# causes code to be generated automatically in printHeader
# and printFooter.
self.header_tag = None
# List of file-private defines that must be undefined at the
# end of the file. This can be used in header files to define
# names for use in the file, then undefine them at the end of
# the header file.
self.undef_list = []
return
def Print(self, api):
self.printHeader()
self.printBody(api)
self.printFooter()
return
def printHeader(self):
"""Print the header associated with all files and call the printRealHeader method."""
print '/* DO NOT EDIT - This file generated automatically by %s script */' \
% (self.name)
print ''
print '/*'
print ' * ' + self.license.replace('\n', '\n * ')
print ' */'
print ''
if self.header_tag:
print '#if !defined( %s )' % (self.header_tag)
print '# define %s' % (self.header_tag)
print ''
self.printRealHeader();
return
def printFooter(self):
"""Print the header associated with all files and call the printRealFooter method."""
self.printRealFooter()
if self.undef_list:
print ''
for u in self.undef_list:
print "# undef %s" % (u)
if self.header_tag:
print ''
print '#endif /* !defined( %s ) */' % (self.header_tag)
def printRealHeader(self):
"""Print the "real" header for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printRealFooter(self):
"""Print the "real" footer for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printPure(self):
"""Conditionally define `PURE' function attribute.
Conditionally defines a preprocessor macro `PURE' that wraps
GCC's `pure' function attribute. The conditional code can be
easilly adapted to other compilers that support a similar
feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("PURE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define PURE __attribute__((pure))
# else
# define PURE
# endif"""
return
def printFastcall(self):
"""Conditionally define `FASTCALL' function attribute.
Conditionally defines a preprocessor macro `FASTCALL' that
wraps GCC's `fastcall' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("FASTCALL")
print """# if defined(__i386__) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
# define FASTCALL __attribute__((fastcall))
# else
# define FASTCALL
# endif"""
return
def printVisibility(self, S, s):
"""Conditionally define visibility function attribute.
Conditionally defines a preprocessor macro name S that wraps
GCC's visibility function attribute. The visibility used is
the parameter s. The conditional code can be easilly adapted
to other compilers that support a similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append(S)
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) && defined(__ELF__)
# define %s __attribute__((visibility("%s")))
# else
# define %s
# endif""" % (S, s, S)
return
def printNoinline(self):
"""Conditionally define `NOINLINE' function attribute.
Conditionally defines a preprocessor macro `NOINLINE' that
wraps GCC's `noinline' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("NOINLINE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define NOINLINE __attribute__((noinline))
# else
# define NOINLINE
# endif"""
return
def real_function_name(element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if alias:
return alias
else:
return name
def real_category_name(c):
if re.compile("[1-9][0-9]*[.][0-9]+").match(c):
return "GL_VERSION_" + c.replace(".", "_")
else:
return c
def classify_category(name, number):
"""Based on the category name and number, select a numerical class for it.
Categories are divided into four classes numbered 0 through 3. The
classes are:
0. Core GL versions, sorted by version number.
1. ARB extensions, sorted by extension number.
2. Non-ARB extensions, sorted by extension number.
3. Un-numbered extensions, sorted by extension name.
"""
try:
core_version = float(name)
except Exception,e:
core_version = 0.0
if core_version > 0.0:
cat_type = 0
key = name
elif name.startswith("GL_ARB_") or name.startswith("GLX_ARB_") or name.startswith("WGL_ARB_"):
cat_type = 1
key = int(number)
else:
if number != None:
cat_type = 2
key = int(number)
else:
cat_type = 3
key = name
return [cat_type, key]
def create_parameter_string(parameters, include_names):
"""Create a parameter string from a list of gl_parameters."""
list = []
for p in parameters:
if p.is_padding:
continue
if include_names:
list.append( p.string() )
else:
list.append( p.type_string() )
if len(list) == 0: list = ["void"]
return string.join(list, ", ")
class gl_item:
def __init__(self, element, context):
self.context = context
self.name = element.nsProp( "name", None )
self.category = real_category_name( element.parent.nsProp( "name", None ) )
return
class gl_type( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.size = int( element.nsProp( "size", None ), 0 )
te = typeexpr.type_expression( None )
tn = typeexpr.type_node()
tn.size = int( element.nsProp( "size", None ), 0 )
tn.integer = not is_attr_true( element, "float" )
tn.unsigned = is_attr_true( element, "unsigned" )
tn.name = "GL" + self.name
te.set_base_type_node( tn )
self.type_expr = te
return
def get_type_expression(self):
return self.type_expr
class gl_enum( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.value = int( element.nsProp( "value", None ), 0 )
temp = element.nsProp( "count", None )
if not temp or temp == "?":
self.default_count = -1
else:
try:
c = int(temp)
except Exception,e:
raise RuntimeError('Invalid count value "%s" for enum "%s" in function "%s" when an integer was expected.' % (temp, self.name, n))
self.default_count = c
return
def priority(self):
"""Calculate a 'priority' for this enum name.
When an enum is looked up by number, there may be many
possible names, but only one is the 'prefered' name. The
priority is used to select which name is the 'best'.
Highest precedence is given to core GL name. ARB extension
names have the next highest, followed by EXT extension names.
Vendor extension names are the lowest.
"""
if self.name.endswith( "_BIT" ):
bias = 1
else:
bias = 0
if self.category.startswith( "GL_VERSION_" ):
priority = 0
elif self.category.startswith( "GL_ARB_" ):
priority = 2
elif self.category.startswith( "GL_EXT_" ):
priority = 4
else:
priority = 6
return priority + bias
class gl_parameter:
def __init__(self, element, context):
self.name = element.nsProp( "name", None )
ts = element.nsProp( "type", None )
self.type_expr = typeexpr.type_expression( ts, context )
temp = element.nsProp( "variable_param", None )
if temp:
self.count_parameter_list = temp.split( ' ' )
else:
self.count_parameter_list = []
# The count tag can be either a numeric string or the name of
# a variable. If it is the name of a variable, the int(c)
# statement will throw an exception, and the except block will
# take over.
c = element.nsProp( "count", None )
try:
count = int(c)
self.count = count
self.counter = None
except Exception,e:
count = 1
self.count = 0
self.counter = c
self.count_scale = int(element.nsProp( "count_scale", None ))
elements = (count * self.count_scale)
if elements == 1:
elements = 0
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (before)*/' % (self.name, self.type_expr.get_stack_size())
# print '/* # elements = %u */' % (elements)
self.type_expr.set_elements( elements )
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (after) */' % (self.name, self.type_expr.get_stack_size())
self.is_client_only = is_attr_true( element, 'client_only' )
self.is_counter = is_attr_true( element, 'counter' )
self.is_output = is_attr_true( element, 'output' )
# Pixel data has special parameters.
self.width = element.nsProp('img_width', None)
self.height = element.nsProp('img_height', None)
self.depth = element.nsProp('img_depth', None)
self.extent = element.nsProp('img_extent', None)
self.img_xoff = element.nsProp('img_xoff', None)
self.img_yoff = element.nsProp('img_yoff', None)
self.img_zoff = element.nsProp('img_zoff', None)
self.img_woff = element.nsProp('img_woff', None)
self.img_format = element.nsProp('img_format', None)
self.img_type = element.nsProp('img_type', None)
self.img_target = element.nsProp('img_target', None)
self.img_pad_dimensions = is_attr_true( element, 'img_pad_dimensions' )
self.img_null_flag = is_attr_true( element, 'img_null_flag' )
self.img_send_null = is_attr_true( element, 'img_send_null' )
self.is_padding = is_attr_true( element, 'padding' )
return
def compatible(self, other):
return 1
def is_array(self):
return self.is_pointer()
def is_pointer(self):
return self.type_expr.is_pointer()
def is_image(self):
if self.width:
return 1
else:
return 0
def is_variable_length(self):
return len(self.count_parameter_list) or self.counter
def is_64_bit(self):
count = self.type_expr.get_element_count()
if count:
if (self.size() / count) == 8:
return 1
else:
if self.size() == 8:
return 1
return 0
def string(self):
return self.type_expr.original_string + " " + self.name
def type_string(self):
return self.type_expr.original_string
def get_base_type_string(self):
return self.type_expr.get_base_name()
def get_dimensions(self):
if not self.width:
return [ 0, "0", "0", "0", "0" ]
dim = 1
w = self.width
h = "1"
d = "1"
e = "1"
if self.height:
dim = 2
h = self.height
if self.depth:
dim = 3
d = self.depth
if self.extent:
dim = 4
e = self.extent
return [ dim, w, h, d, e ]
def get_stack_size(self):
return self.type_expr.get_stack_size()
def size(self):
if self.is_image():
return 0
else:
return self.type_expr.get_element_size()
def get_element_count(self):
c = self.type_expr.get_element_count()
if c == 0:
return 1
return c
def size_string(self, use_parens = 1):
s = self.size()
if self.counter or self.count_parameter_list:
list = [ "compsize" ]
if self.counter and self.count_parameter_list:
list.append( self.counter )
elif self.counter:
list = [ self.counter ]
if s > 1:
list.append( str(s) )
if len(list) > 1 and use_parens :
return "(%s)" % (string.join(list, " * "))
else:
return string.join(list, " * ")
elif self.is_image():
return "compsize"
else:
return str(s)
def format_string(self):
if self.type_expr.original_string == "GLenum":
return "0x%x"
else:
return self.type_expr.format_string()
class gl_function( gl_item ):
def __init__(self, element, context):
self.context = context
self.name = None
self.entry_points = []
self.return_type = "void"
self.parameters = []
self.offset = -1
self.initialized = 0
self.images = []
self.assign_offset = 0
self.static_entry_points = []
# Track the parameter string (for the function prototype)
# for each entry-point. This is done because some functions
# change their prototype slightly when promoted from extension
# to ARB extension to core. glTexImage3DEXT and glTexImage3D
# are good examples of this. Scripts that need to generate
# code for these differing aliases need to real prototype
# for each entry-point. Otherwise, they may generate code
# that won't compile.
self.parameter_strings = {}
self.process_element( element )
return
def process_element(self, element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if is_attr_true(element, "static_dispatch"):
self.static_entry_points.append(name)
self.entry_points.append( name )
if alias:
true_name = alias
else:
true_name = name
# Only try to set the offset when a non-alias
# entry-point is being processes.
offset = element.nsProp( "offset", None )
if offset:
try:
o = int( offset )
self.offset = o
except Exception, e:
self.offset = -1
if offset == "assign":
self.assign_offset = 1
if not self.name:
self.name = true_name
elif self.name != true_name:
raise RuntimeError("Function true name redefined. Was %s, now %s." % (self.name, true_name))
# There are two possible cases. The first time an entry-point
# with data is seen, self.initialized will be 0. On that
# pass, we just fill in the data. The next time an
# entry-point with data is seen, self.initialized will be 1.
# On that pass we have to make that the new values match the
# valuse from the previous entry-point.
parameters = []
return_type = "void"
child = element.children
while child:
if child.type == "element":
if child.name == "return":
return_type = child.nsProp( "type", None )
elif child.name == "param":
param = self.context.factory.create_item( "parameter", child, self.context)
parameters.append( param )
child = child.next
if self.initialized:
if self.return_type != return_type:
raise RuntimeError( "Return type changed in %s. Was %s, now %s." % (name, self.return_type, return_type))
if len(parameters) != len(self.parameters):
raise RuntimeError( "Parameter count mismatch in %s. Was %d, now %d." % (name, len(self.parameters), len(parameters)))
for j in range(0, len(parameters)):
p1 = parameters[j]
p2 = self.parameters[j]
if not p1.compatible( p2 ):
raise RuntimeError( 'Parameter type mismatch in %s. "%s" was "%s", now "%s".' % (name, p2.name, p2.type_expr.original_string, p1.type_expr.original_string))
if true_name == name or not self.initialized:
self.return_type = return_type
self.parameters = parameters
for param in self.parameters:
if param.is_image():
self.images.append( param )
if element.children:
self.initialized = 1
self.parameter_strings[name] = create_parameter_string(parameters, 1)
else:
self.parameter_strings[name] = None
return
def get_images(self):
"""Return potentially empty list of input images."""
return self.images
def parameterIterator(self):
return self.parameters.__iter__();
def get_parameter_string(self, entrypoint = None):
if entrypoint:
s = self.parameter_strings[ entrypoint ]
if s:
return s
return create_parameter_string( self.parameters, 1 )
def get_called_parameter_string(self):
p_string = ""
comma = ""
for p in self.parameterIterator():
p_string = p_string + comma + p.name
comma = ", "
return p_string
def is_abi(self):
return (self.offset >= 0 and not self.assign_offset)
def is_static_entry_point(self, name):
return name in self.static_entry_points
def dispatch_name(self):
if self.name in self.static_entry_points:
return self.name
else:
return "_dispatch_stub_%u" % (self.offset)
def static_name(self, name):
if name in self.static_entry_points:
return name
else:
return "_dispatch_stub_%u" % (self.offset)
class gl_item_factory:
"""Factory to create objects derived from gl_item."""
def create_item(self, item_name, element, context):
if item_name == "function":
return gl_function(element, context)
if item_name == "type":
return gl_type(element, context)
elif item_name == "enum":
return gl_enum(element, context)
elif item_name == "parameter":
return gl_parameter(element, context)
elif item_name == "api":
return gl_api(self)
else:
return None
class gl_api:
def __init__(self, factory):
self.functions_by_name = {}
self.enums_by_name = {}
self.types_by_name = {}
self.category_dict = {}
self.categories = [{}, {}, {}, {}]
self.factory = factory
self.next_offset = 0
typeexpr.create_initial_types()
return
def process_element(self, doc):
element = doc.children
while element.type != "element" or element.name != "OpenGLAPI":
element = element.next
if element:
self.process_OpenGLAPI(element)
return
def process_OpenGLAPI(self, element):
child = element.children
while child:
if child.type == "element":
if child.name == "category":
self.process_category( child )
elif child.name == "OpenGLAPI":
self.process_OpenGLAPI( child )
child = child.next
return
def process_category(self, cat):
cat_name = cat.nsProp( "name", None )
cat_number = cat.nsProp( "number", None )
[cat_type, key] = classify_category(cat_name, cat_number)
self.categories[cat_type][key] = [cat_name, cat_number]
child = cat.children
while child:
if child.type == "element":
if child.name == "function":
func_name = real_function_name( child )
temp_name = child.nsProp( "name", None )
self.category_dict[ temp_name ] = [cat_name, cat_number]
if self.functions_by_name.has_key( func_name ):
func = self.functions_by_name[ func_name ]
func.process_element( child )
else:
func = self.factory.create_item( "function", child, self )
self.functions_by_name[ func_name ] = func
if func.offset >= self.next_offset:
self.next_offset = func.offset + 1
elif child.name == "enum":
enum = self.factory.create_item( "enum", child, self )
self.enums_by_name[ enum.name ] = enum
elif child.name == "type":
t = self.factory.create_item( "type", child, self )
self.types_by_name[ "GL" + t.name ] = t
child = child.next
return
def functionIterateByCategory(self, cat = None):
"""Iterate over functions by category.
If cat is None, all known functions are iterated in category
order. See classify_category for details of the ordering.
Within a category, functions are sorted by name. If cat is
not None, then only functions in that category are iterated.
"""
lists = [{}, {}, {}, {}]
for func in self.functionIterateAll():
[cat_name, cat_number] = self.category_dict[func.name]
if (cat == None) or (cat == cat_name):
[func_cat_type, key] = classify_category(cat_name, cat_number)
if not lists[func_cat_type].has_key(key):
lists[func_cat_type][key] = {}
lists[func_cat_type][key][func.name] = func
functions = []
for func_cat_type in range(0,4):
keys = lists[func_cat_type].keys()
keys.sort()
for key in keys:
names = lists[func_cat_type][key].keys()
names.sort()
for name in names:
functions.append(lists[func_cat_type][key][name])
return functions.__iter__()
def functionIterateByOffset(self):
max_offset = -1
for func in self.functions_by_name.itervalues():
if func.offset > max_offset:
max_offset = func.offset
temp = [None for i in range(0, max_offset + 1)]
for func in self.functions_by_name.itervalues():
if func.offset != -1:
temp[ func.offset ] = func
list = []
for i in range(0, max_offset + 1):
if temp[i]:
list.append(temp[i])
return list.__iter__();
def functionIterateAll(self):
return self.functions_by_name.itervalues()
def enumIterateByName(self):
keys = self.enums_by_name.keys()
keys.sort()
list = []
for enum in keys:
list.append( self.enums_by_name[ enum ] )
return list.__iter__()
def categoryIterate(self):
"""Iterate over categories.
Iterate over all known categories in the order specified by
classify_category. Each iterated value is a tuple of the
name and number (which may be None) of the category.
"""
list = []
for cat_type in range(0,4):
keys = self.categories[cat_type].keys()
keys.sort()
for key in keys:
list.append(self.categories[cat_type][key])
return list.__iter__()
def get_category_for_name( self, name ):
if self.category_dict.has_key(name):
return self.category_dict[name]
else:
return ["<unknown category>", None]
def typeIterate(self):
return self.types_by_name.itervalues()
def find_type( self, type_name ):
if type_name in self.types_by_name:
return self.types_by_name[ type_name ].type_expr
else:
print "Unable to find base type matching \"%s\"." % (type_name)
return None
|
bsd-3-clause
|
yrizk/django-blog
|
blogvenv/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
2040
|
8935
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
apache-2.0
|
Tuxemon/Tuxemon-Server
|
tuxemon_server/core/game/event/actions/core.py
|
3
|
9876
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <[email protected]>
# Leif Theden <[email protected]>
#
from __future__ import absolute_import
import logging
from core.tools import open_dialog
# Create a logger for optional handling of debug messages.
logger = logging.getLogger(__name__)
class Core(object):
def __init__(self):
# this is a potentially temporary solution to a problem with dialog chains
self._dialog_chain_queue = list()
def _replace_text(self, game, text):
"""Replaces ${{var}} tiled variables with their in-game value.
:param game: The main game object that contains all the game's variables.
:param text: The text to replace.
:type game: core.control.Control
:type text: String
:rtype: String
:returns: Replaced string text with in-game values.
**Examples:**
>>> self._replace_text("${{name}} is running away!")
'Red is running away!'
"""
text = text.replace("${{name}}", game.player1.name)
text = text.replace(r"\n", "\n")
return text
def set_variable(self, game, action):
"""Sets the key in the player.game_variables dictionary.
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: variable_name:value
**Examples:**
>>> action.__dict__
{
"type": "set_variable",
"parameters": [
"battle_won:yes"
]
}
"""
# Get the player object from the game.
player = game.player1
# Split the variable into a key: value pair
var_list = action.parameters[0].split(":")
var_key = str(var_list[0])
var_value = str(var_list[1])
# Append the game_variables dictionary with the key: value pair
player.game_variables[var_key] = var_value
def dialog(self, game, action):
"""Opens a dialog window with text
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: text_to_display
You may also use special variables in dialog events. Here is a list of available variables:
* ${{name}} - The current player's name.
**Examples:**
>>> action.__dict__
{
"type": "dialog",
"parameters": [
"Red:\\n This is some dialog!"
]
}
"""
text = str(action.parameters[0])
text = self._replace_text(game, text)
logger.info("Opening dialog window")
# Open a dialog window in the current scene.
open_dialog(game, [text])
def dialog_chain(self, game, action):
"""Opens a chain of dialogs in order. Dialog chain must be ended with the ${{end}} keyword.
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: text_to_display
You may also use special variables in dialog events. Here is a list of available variables:
* ${{name}} - The current player's name.
* ${{end}} - Ends the dialog chain.
**Examples:**
>>> action.__dict__
{
"type": "dialog_chain",
"parameters": [
"Red:\\n This is some dialog!"
]
}
"""
text = str(action.parameters[0])
text = self._replace_text(game, text)
logger.info("Opening chain dialog window")
if text == "${{end}}":
# Open a dialog window in the current scene.
open_dialog(game, self._dialog_chain_queue)
self._dialog_chain_queue = list()
else:
self._dialog_chain_queue.append(text)
def rumble(self, game, action):
"""Rumbles available controllers with rumble support
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: duration,power
* duration (float): time in seconds to rumble for
* power (int): percentage of power to rumble. (1-100)
**Examples:**
>>> action.__dict__
{
"type": "rumble",
"parameters": [
"2",
"100"
]
}
"""
duration = float(action.parameters[0])
power = int(action.parameters[1])
min_power = 0
max_power = 24576
if power < 0:
power = 0
elif power > 100:
power = 100
magnitude = int((power * 0.01) * max_power)
game.rumble.rumble(-1, length=duration, magnitude=magnitude)
def wait_for_secs(self, game, action):
"""Pauses the event engine for n number of seconds.
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: duration
* duration (float): time in seconds for the event engine to wait for
**Examples:**
>>> action.__dict__
{
"type": "wait_for_secs",
"parameters": [
"2.0"
]
}
"""
secs = float(action.parameters[0])
game.event_engine.state = "waiting"
game.event_engine.wait = secs
def wait_for_input(self, game, action):
"""Pauses the event engine until specified button is pressed
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: button
* button (str): pygame key to wait for
**Examples:**
>>> action.__dict__
{
"type": "wait_for_input",
"parameters": [
"K_RETURN"
]
}
"""
button = str(action.parameters[0])
game.event_engine.state = "waiting for input"
game.event_engine.wait = 2
game.event_engine.button = button
def change_state(self, game, action):
"""Changes to the specified state.
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: state_name
* state_name (str): The state name to switch to.
**Examples:**
>>> action.__dict__
{
"type": "change_state",
"parameters": [
"MAIN_MENU"
]
}
"""
# Don't override previous state if we are still in the state.
if game.state_name != action.parameters[0]:
game.push_state(action.parameters[0])
def call_event(self, game, action):
"""Executes the specified event's actions by id.
:param game: The main game object that contains all the game's variables.
:param action: The action (tuple) retrieved from the database that contains the action's
parameters
:type game: core.control.Control
:type action: Tuple
:rtype: None
:returns: None
Valid Parameters: event_id
* event_id (int): The tmx id of the event to call.
**Examples:**
>>> action.__dict__
{
"type": "call_event",
"parameters": [
"2"
]
}
"""
event_engine = game.event_engine
events = game.events
for e in events:
if e['id'] == int(action.parameters[0]):
event_engine.execute_action(e['acts'], game)
|
gpl-3.0
|
jrief/django-filer
|
filer/models/clipboardmodels.py
|
22
|
1562
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from filer.models import filemodels
from filer.utils.compatibility import python_2_unicode_compatible
@python_2_unicode_compatible
class Clipboard(models.Model):
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'), verbose_name=_('user'), related_name="filer_clipboards")
files = models.ManyToManyField(
'File', verbose_name=_('files'), related_name="in_clipboards",
through='ClipboardItem')
def append_file(self, file_obj):
try:
# We have to check if file is already in the clipboard as otherwise polymorphic complains
self.files.get(pk=file_obj.pk)
return False
except filemodels.File.DoesNotExist:
newitem = ClipboardItem(file=file_obj, clipboard=self)
newitem.save()
return True
def __str__(self):
return "Clipboard %s of %s" % (self.id, self.user)
class Meta:
app_label = 'filer'
verbose_name = _('clipboard')
verbose_name_plural = _('clipboards')
class ClipboardItem(models.Model):
file = models.ForeignKey('File', verbose_name=_('file'))
clipboard = models.ForeignKey(Clipboard, verbose_name=_('clipboard'))
class Meta:
app_label = 'filer'
verbose_name = _('clipboard item')
verbose_name_plural = _('clipboard items')
|
bsd-3-clause
|
dongjoon-hyun/tensorflow
|
tensorflow/contrib/timeseries/python/timeseries/ar_model_test.py
|
25
|
15933
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ar_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import test_utils
from tensorflow.contrib.timeseries.python.timeseries.estimators import ARRegressor
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training
class ARModelTest(test.TestCase):
def create_data(self,
noise_stddev,
anomaly_prob,
multiple_periods=False,
anomaly_stddev_scale=20):
self.period = 25
num_samples = 200
time = 1 + 3 * np.arange(num_samples).astype(np.int64)
time_offset = (2 * np.pi * (time % self.period).astype(np.float) /
self.period).reshape([-1, 1])
if multiple_periods:
period2 = 55
self.period = [self.period, period2]
time_offset2 = ((time % period2).astype(np.float) / period2).reshape(
[-1, 1])
data1 = np.sin(time_offset / 2.0) ** 2 * (1 + time_offset2)
else:
data1 = np.sin(2 * time_offset) + np.cos(3 * time_offset)
data1 += noise_stddev / 4. * np.random.randn(num_samples, 1)
data2 = (np.sin(3 * time_offset) + np.cos(5 * time_offset) +
noise_stddev / 3. * np.random.randn(num_samples, 1))
# Add some anomalies to data1
if anomaly_prob > 0.:
num_anomalies = int(anomaly_prob * num_samples)
anomaly_values = (anomaly_stddev_scale * noise_stddev / 4 *
np.random.randn(num_anomalies))
indices = np.random.randint(0, num_samples, num_anomalies)
for index, val in zip(indices, anomaly_values):
data1[index] += val
data = np.concatenate((4 * data1, 3 * data2), axis=1)
split = int(num_samples * 0.8)
train_data = {TrainEvalFeatures.TIMES: time[0:split],
TrainEvalFeatures.VALUES: data[0:split]}
test_data = {TrainEvalFeatures.TIMES: time[split:],
TrainEvalFeatures.VALUES: data[split:]}
return (train_data, test_data)
# Note that most models will require many more steps to fully converge. We
# have used a small number of steps here to keep the running time small.
def train_helper(self, input_window_size, loss,
max_loss=None, train_steps=200,
anomaly_prob=0.01,
anomaly_distribution=None,
multiple_periods=False):
np.random.seed(3)
data_noise_stddev = 0.2
if max_loss is None:
if loss == ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS:
max_loss = 1.0
else:
max_loss = 0.05 / (data_noise_stddev ** 2)
train_data, test_data = self.create_data(
noise_stddev=data_noise_stddev,
anomaly_prob=anomaly_prob,
multiple_periods=multiple_periods)
output_window_size = 10
window_size = input_window_size + output_window_size
class _RunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
estimator = ARRegressor(
periodicities=self.period,
anomaly_prior_probability=0.01 if anomaly_distribution else None,
anomaly_distribution=anomaly_distribution,
num_features=2,
output_window_size=output_window_size,
num_time_buckets=20,
input_window_size=input_window_size,
hidden_layer_sizes=[16],
loss=loss,
config=_RunConfig())
train_input_fn = input_pipeline.RandomWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(train_data),
window_size=window_size,
batch_size=64,
num_threads=1,
shuffle_seed=2)
test_input_fn = test_utils.AllWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(test_data),
window_size=window_size)
# Test training
estimator.train(
input_fn=train_input_fn,
steps=train_steps)
test_evaluation = estimator.evaluate(input_fn=test_input_fn, steps=1)
test_loss = test_evaluation["loss"]
logging.info("Final test loss: %f", test_loss)
self.assertLess(test_loss, max_loss)
if loss == ar_model.ARModel.SQUARED_LOSS:
# Test that the evaluation loss is reported without input scaling.
self.assertAllClose(
test_loss,
np.mean((test_evaluation["mean"] - test_evaluation["observed"]) ** 2))
# Test predict
train_data_times = train_data[TrainEvalFeatures.TIMES]
train_data_values = train_data[TrainEvalFeatures.VALUES]
test_data_times = test_data[TrainEvalFeatures.TIMES]
test_data_values = test_data[TrainEvalFeatures.VALUES]
predict_times = np.expand_dims(np.concatenate(
[train_data_times[input_window_size:], test_data_times]), 0)
predict_true_values = np.expand_dims(np.concatenate(
[train_data_values[input_window_size:], test_data_values]), 0)
state_times = np.expand_dims(train_data_times[:input_window_size], 0)
state_values = np.expand_dims(
train_data_values[:input_window_size, :], 0)
state_exogenous = state_times[:, :, None][:, :, :0]
def prediction_input_fn():
return ({
PredictionFeatures.TIMES: training.limit_epochs(
predict_times, num_epochs=1),
PredictionFeatures.STATE_TUPLE: (state_times,
state_values,
state_exogenous)
}, {})
(predictions,) = tuple(estimator.predict(input_fn=prediction_input_fn))
predicted_mean = predictions["mean"][:, 0]
true_values = predict_true_values[0, :, 0]
if loss == ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS:
variances = predictions["covariance"][:, 0]
standard_deviations = np.sqrt(variances)
# Note that we may get tighter bounds with more training steps.
errors = np.abs(predicted_mean - true_values) > 4 * standard_deviations
fraction_errors = np.mean(errors)
logging.info("Fraction errors: %f", fraction_errors)
def test_time_regression_squared(self):
self.train_helper(input_window_size=0,
train_steps=350,
loss=ar_model.ARModel.SQUARED_LOSS)
def test_autoregression_squared(self):
self.train_helper(input_window_size=15,
loss=ar_model.ARModel.SQUARED_LOSS)
def test_autoregression_short_input_window(self):
self.train_helper(input_window_size=8,
loss=ar_model.ARModel.SQUARED_LOSS)
def test_autoregression_normal(self):
self.train_helper(input_window_size=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
train_steps=300,
max_loss=50., # Just make sure there are no exceptions.
anomaly_distribution=None)
def test_autoregression_normal_multiple_periods(self):
self.train_helper(input_window_size=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
max_loss=2.0,
multiple_periods=True,
anomaly_distribution=None)
def test_autoregression_normal_anomalies_normal(self):
self.train_helper(
input_window_size=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
anomaly_distribution=ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY)
def test_autoregression_normal_anomalies_cauchy(self):
self.train_helper(
input_window_size=10,
max_loss=1.5,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS,
anomaly_distribution=ar_model.AnomalyMixtureARModel.CAUCHY_ANOMALY)
def test_wrong_window_size(self):
estimator = ARRegressor(
periodicities=10, num_features=1,
input_window_size=10, output_window_size=6)
def _bad_window_size_input_fn():
return ({TrainEvalFeatures.TIMES: [[1]],
TrainEvalFeatures.VALUES: [[[1.]]]},
None)
def _good_data():
return ({TrainEvalFeatures.TIMES: np.arange(16)[None, :],
TrainEvalFeatures.VALUES: array_ops.reshape(
np.arange(16), [1, 16, 1])},
None)
with self.assertRaisesRegexp(ValueError, "set window_size=16"):
estimator.train(input_fn=_bad_window_size_input_fn, steps=1)
# Get a checkpoint for evaluation
estimator.train(input_fn=_good_data, steps=1)
with self.assertRaisesRegexp(ValueError, "requires a window of at least"):
estimator.evaluate(input_fn=_bad_window_size_input_fn, steps=1)
def test_predictions_direct_flat(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2,
prediction_model_factory=functools.partial(
ar_model.FlatPredictionModel,
hidden_layer_sizes=[40, 10]))
with session.Session():
predicted_values = model.predict({
PredictionFeatures.TIMES: [[4, 6, 10]],
PredictionFeatures.STATE_TUPLE: (
[[1, 2]], [[[1.], [2.]]], [[[], []]])
})
variables.global_variables_initializer().run()
self.assertAllEqual(predicted_values["mean"].eval().shape,
[1, 3, 1])
def test_predictions_direct_lstm(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel,
num_units=16))
with session.Session():
predicted_values = model.predict({
PredictionFeatures.TIMES: [[4, 6, 10]],
PredictionFeatures.STATE_TUPLE: (
[[1, 2]], [[[1.], [2.]]], [[[], []]])
})
variables.global_variables_initializer().run()
self.assertAllEqual(predicted_values["mean"].eval().shape,
[1, 3, 1])
def test_long_eval(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=1)
raw_features = {
TrainEvalFeatures.TIMES: [[1, 3, 5, 7, 11]],
TrainEvalFeatures.VALUES: [[[1.], [2.], [3.], [4.], [5.]]]}
chunked_features, _ = test_utils.AllWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(raw_features),
window_size=3)()
model.initialize_graph()
with variable_scope.variable_scope("armodel") as scope:
raw_evaluation = model.define_loss(
raw_features, mode=estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope(scope, reuse=True):
chunked_evaluation = model.define_loss(
chunked_features, mode=estimator_lib.ModeKeys.EVAL)
with session.Session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.global_variables_initializer().run()
raw_evaluation_evaled, chunked_evaluation_evaled = sess.run(
[raw_evaluation, chunked_evaluation])
self.assertAllClose(chunked_evaluation_evaled.loss,
raw_evaluation_evaled.loss)
last_chunk_evaluation_state = [
state[-1, None] for state in
chunked_evaluation_evaled.end_state]
for last_chunk_state_member, raw_state_member in zip(
last_chunk_evaluation_state, raw_evaluation_evaled.end_state):
self.assertAllClose(last_chunk_state_member, raw_state_member)
self.assertAllEqual([[5, 7, 11]],
raw_evaluation_evaled.prediction_times)
for feature_name in raw_evaluation.predictions:
self.assertAllEqual(
[1, 3, 1], # batch, window, num_features. The window size has 2
# cut off for the first input_window.
raw_evaluation_evaled.predictions[feature_name].shape)
self.assertAllClose(
np.reshape(chunked_evaluation_evaled.predictions[feature_name],
[-1]),
np.reshape(raw_evaluation_evaled.predictions[feature_name],
[-1]))
coordinator.request_stop()
coordinator.join()
def test_long_eval_discard_indivisible(self):
g = ops.Graph()
with g.as_default():
model = ar_model.ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2)
raw_features = {
TrainEvalFeatures.TIMES: [[1, 3, 5, 7, 11]],
TrainEvalFeatures.VALUES: [[[1.], [2.], [3.], [4.], [5.]]]}
model.initialize_graph()
raw_evaluation = model.define_loss(
raw_features, mode=estimator_lib.ModeKeys.EVAL)
with session.Session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.global_variables_initializer().run()
raw_evaluation_evaled = sess.run(raw_evaluation)
self.assertAllEqual([[7, 11]],
raw_evaluation_evaled.prediction_times)
for feature_name in raw_evaluation.predictions:
self.assertAllEqual(
[1, 2, 1], # batch, window, num_features. The window has two cut
# off for the first input window and one discarded so
# that the remainder is divisible into output windows.
raw_evaluation_evaled.predictions[feature_name].shape)
coordinator.request_stop()
coordinator.join()
if __name__ == "__main__":
test.main()
|
apache-2.0
|
AnonymFx/scripts
|
ios-to-android-strings.py
|
5
|
2531
|
#!/usr/bin/python
import argparse
import os
import re
def main():
# def arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("input")
argparser.add_argument("target")
args = argparser.parse_args()
# get input file
if not os.path.isfile(args.input):
raise Exception('File not found: ' + args.input)
input_file = open(args.input)
if os.path.isfile(args.target):
os.remove(args.target)
if not os.path.exists(os.path.dirname(args.target)):
os.makedirs(os.path.dirname(args.target))
output_file = open(args.target, 'w+')
input_string = read_input(input_file)
input_file.close()
mappings = parse_input(input_string)
write_output(mappings, output_file)
output_file.close()
def read_input(file):
# read file to string
file_content = file.read()
file_content = re.sub("(/\\*([^*]|[\\r\\n]|(\\*+([^*/]|[\\r\\n])))*\\*+/)|(//.*)", "", file_content)
return file_content
def parse_input(input):
def_strings = re.findall("\"[^\"]*\"\\s*=\\s*\"[^\"]*\";", input)
mappings = []
for string_def in def_strings:
# remove semicolon
string_def = string_def[:-1]
# split at equals
split = string_def.split('=')
if len(split) != 2:
raise Exception('Invalid format in the input file at: ' + input)
key = split[0].strip()
value = split[1].strip()
# remove quotes
key = key[1:-1]
value = value[1:-1]
if "%@" in value:
value = value.replace('%@', '%s')
# handle html values
if len(value) > 0 and value.strip()[0] == '<':
value = '<![CDATA[' + value + ']]>'
# add to list
mappings.append((key, value))
return mappings
def write_output(mappings, output_file):
# write opening resources
output_file.write('<resources\n'
'\txmlns:tools="http://schemas.android.com/tools"\n'
'\ttools:ignore="MissingTranslation">\n')
# write key value pairs
i = 1
for (key, value) in mappings:
key_value_string = '\t<string name=\"{0}\">{1}</string>\n'
if "%" in value:
key_value_string = '\t<string formatted=\"false\" name=\"{0}\">{1}</string>\n'
key_value_string = key_value_string.format(key, value)
output_file.write(key_value_string)
# write closing resources
output_file.write('</resources>')
pass
if __name__ == "__main__":
main()
|
gpl-3.0
|
eamuntz/Django-Tut
|
env/lib/python2.7/site-packages/django/core/mail/backends/base.py
|
660
|
1164
|
"""Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
|
mit
|
justasabc/kubernetes-ubuntu
|
ke/images/python/setting.py
|
1
|
1316
|
#/usr/bin/python
# -*- coding:utf-8 -*-
# base/apache/var/www/region_load/
# region related
INTERNAL_ADDRESS = "0.0.0.0"
EXTERNAL_HOSTNAME = "162.105.17.48"
ALLOW_ALTERNATE_PORTS = False
MAX_AGENTS = 100
MAX_PRIMS = 15000
GLOBAL_REGION_DATA2 = {
"huyu": {"orig":(1000,1000), "startPort":9000, "wh":(2,2)},
"xwd": {"orig":(1005,1000), "startPort":9050, "wh":(2,2)},
"newhuyu": {"orig":(1010,1000), "startPort":9100, "wh":(2,2)},
"newxwd": {"orig":(1015,1000), "startPort":9150, "wh":(2,2)},
"newregion": {"orig":(1020,1000), "startPort":9200, "wh":(2,2)}
}
CLUSTER_DATA_DIR = "./cluster_data/"
# minions related
MINIONS=["minion1","minion2","minion3"]
# kubernetes related
DOCKER_REGISTRY='docker-registry:5000'
APACHE_NAME='apache'
MYSQL_NAME='mysql'
ROBUST_NAME='robust'
APACHE_IMAGE="docker-registry:5000/ubuntu:apache"
MYSQL_IMAGE="docker-registry:5000/ubuntu:mysql"
ROBUST_IMAGE="docker-registry:5000/ubuntu:robust"
OPENSIM_NAME='opensim'
OPENSIM_IMAGE="docker-registry:5000/ubuntu:opensim"
OPENSIM_COMMAND = ["/bin/bash", "/home/opensim80/bin/ke/start_opensim_xxx.sh"]
CPU = 1024
MEMORY = 1000000000
APACHE_COMMAND = ["/bin/bash", "/home/start_apache.sh"]
MYSQL_COMMAND = ["/bin/bash", "/home/start_mysql.sh"]
ROBUST_COMMAND = ["/bin/bash", "/home/opensim80/bin/ke/start_robust.sh"]
# cpu/memory
|
apache-2.0
|
BurntSushi/docopt
|
test_docopt.py
|
13
|
25889
|
from __future__ import with_statement
from docopt import (docopt, DocoptExit, DocoptLanguageError,
Option, Argument, Command, OptionsShortcut,
Required, Optional, Either, OneOrMore,
parse_argv, parse_pattern, parse_section,
parse_defaults, formal_usage, Tokens, transform
)
from pytest import raises
def test_pattern_flat():
assert Required(OneOrMore(Argument('N')),
Option('-a'), Argument('M')).flat() == \
[Argument('N'), Option('-a'), Argument('M')]
assert Required(Optional(OptionsShortcut()),
Optional(Option('-a', None))).flat(OptionsShortcut) == \
[OptionsShortcut()]
def test_option():
assert Option.parse('-h') == Option('-h', None)
assert Option.parse('--help') == Option(None, '--help')
assert Option.parse('-h --help') == Option('-h', '--help')
assert Option.parse('-h, --help') == Option('-h', '--help')
assert Option.parse('-h TOPIC') == Option('-h', None, 1)
assert Option.parse('--help TOPIC') == Option(None, '--help', 1)
assert Option.parse('-h TOPIC --help TOPIC') == Option('-h', '--help', 1)
assert Option.parse('-h TOPIC, --help TOPIC') == Option('-h', '--help', 1)
assert Option.parse('-h TOPIC, --help=TOPIC') == Option('-h', '--help', 1)
assert Option.parse('-h Description...') == Option('-h', None)
assert Option.parse('-h --help Description...') == Option('-h', '--help')
assert Option.parse('-h TOPIC Description...') == Option('-h', None, 1)
assert Option.parse(' -h') == Option('-h', None)
assert Option.parse('-h TOPIC Descripton... [default: 2]') == \
Option('-h', None, 1, '2')
assert Option.parse('-h TOPIC Descripton... [default: topic-1]') == \
Option('-h', None, 1, 'topic-1')
assert Option.parse('--help=TOPIC ... [default: 3.14]') == \
Option(None, '--help', 1, '3.14')
assert Option.parse('-h, --help=DIR ... [default: ./]') == \
Option('-h', '--help', 1, "./")
assert Option.parse('-h TOPIC Descripton... [dEfAuLt: 2]') == \
Option('-h', None, 1, '2')
def test_option_name():
assert Option('-h', None).name == '-h'
assert Option('-h', '--help').name == '--help'
assert Option(None, '--help').name == '--help'
def test_commands():
assert docopt('Usage: prog add', 'add') == {'add': True}
assert docopt('Usage: prog [add]', '') == {'add': False}
assert docopt('Usage: prog [add]', 'add') == {'add': True}
assert docopt('Usage: prog (add|rm)', 'add') == {'add': True, 'rm': False}
assert docopt('Usage: prog (add|rm)', 'rm') == {'add': False, 'rm': True}
assert docopt('Usage: prog a b', 'a b') == {'a': True, 'b': True}
with raises(DocoptExit):
docopt('Usage: prog a b', 'b a')
def test_formal_usage():
doc = """
Usage: prog [-hv] ARG
prog N M
prog is a program."""
usage, = parse_section('usage:', doc)
assert usage == "Usage: prog [-hv] ARG\n prog N M"
assert formal_usage(usage) == "( [-hv] ARG ) | ( N M )"
def test_parse_argv():
o = [Option('-h'), Option('-v', '--verbose'), Option('-f', '--file', 1)]
TS = lambda s: Tokens(s, error=DocoptExit)
assert parse_argv(TS(''), options=o) == []
assert parse_argv(TS('-h'), options=o) == [Option('-h', None, 0, True)]
assert parse_argv(TS('-h --verbose'), options=o) == \
[Option('-h', None, 0, True), Option('-v', '--verbose', 0, True)]
assert parse_argv(TS('-h --file f.txt'), options=o) == \
[Option('-h', None, 0, True), Option('-f', '--file', 1, 'f.txt')]
assert parse_argv(TS('-h --file f.txt arg'), options=o) == \
[Option('-h', None, 0, True),
Option('-f', '--file', 1, 'f.txt'),
Argument(None, 'arg')]
assert parse_argv(TS('-h --file f.txt arg arg2'), options=o) == \
[Option('-h', None, 0, True),
Option('-f', '--file', 1, 'f.txt'),
Argument(None, 'arg'),
Argument(None, 'arg2')]
assert parse_argv(TS('-h arg -- -v'), options=o) == \
[Option('-h', None, 0, True),
Argument(None, 'arg'),
Argument(None, '--'),
Argument(None, '-v')]
def test_parse_pattern():
o = [Option('-h'), Option('-v', '--verbose'), Option('-f', '--file', 1)]
assert parse_pattern('[ -h ]', options=o) == \
Required(Optional(Option('-h')))
assert parse_pattern('[ ARG ... ]', options=o) == \
Required(Optional(OneOrMore(Argument('ARG'))))
assert parse_pattern('[ -h | -v ]', options=o) == \
Required(Optional(Either(Option('-h'),
Option('-v', '--verbose'))))
assert parse_pattern('( -h | -v [ --file <f> ] )', options=o) == \
Required(Required(
Either(Option('-h'),
Required(Option('-v', '--verbose'),
Optional(Option('-f', '--file', 1, None))))))
assert parse_pattern('(-h|-v[--file=<f>]N...)', options=o) == \
Required(Required(Either(Option('-h'),
Required(Option('-v', '--verbose'),
Optional(Option('-f', '--file', 1, None)),
OneOrMore(Argument('N'))))))
assert parse_pattern('(N [M | (K | L)] | O P)', options=[]) == \
Required(Required(Either(
Required(Argument('N'),
Optional(Either(Argument('M'),
Required(Either(Argument('K'),
Argument('L')))))),
Required(Argument('O'), Argument('P')))))
assert parse_pattern('[ -h ] [N]', options=o) == \
Required(Optional(Option('-h')),
Optional(Argument('N')))
assert parse_pattern('[options]', options=o) == \
Required(Optional(OptionsShortcut()))
assert parse_pattern('[options] A', options=o) == \
Required(Optional(OptionsShortcut()),
Argument('A'))
assert parse_pattern('-v [options]', options=o) == \
Required(Option('-v', '--verbose'),
Optional(OptionsShortcut()))
assert parse_pattern('ADD', options=o) == Required(Argument('ADD'))
assert parse_pattern('<add>', options=o) == Required(Argument('<add>'))
assert parse_pattern('add', options=o) == Required(Command('add'))
def test_option_match():
assert Option('-a').match([Option('-a', value=True)]) == \
(True, [], [Option('-a', value=True)])
assert Option('-a').match([Option('-x')]) == (False, [Option('-x')], [])
assert Option('-a').match([Argument('N')]) == (False, [Argument('N')], [])
assert Option('-a').match([Option('-x'), Option('-a'), Argument('N')]) == \
(True, [Option('-x'), Argument('N')], [Option('-a')])
assert Option('-a').match([Option('-a', value=True), Option('-a')]) == \
(True, [Option('-a')], [Option('-a', value=True)])
def test_argument_match():
assert Argument('N').match([Argument(None, 9)]) == \
(True, [], [Argument('N', 9)])
assert Argument('N').match([Option('-x')]) == (False, [Option('-x')], [])
assert Argument('N').match([Option('-x'),
Option('-a'),
Argument(None, 5)]) == \
(True, [Option('-x'), Option('-a')], [Argument('N', 5)])
assert Argument('N').match([Argument(None, 9), Argument(None, 0)]) == \
(True, [Argument(None, 0)], [Argument('N', 9)])
def test_command_match():
assert Command('c').match([Argument(None, 'c')]) == \
(True, [], [Command('c', True)])
assert Command('c').match([Option('-x')]) == (False, [Option('-x')], [])
assert Command('c').match([Option('-x'),
Option('-a'),
Argument(None, 'c')]) == \
(True, [Option('-x'), Option('-a')], [Command('c', True)])
assert Either(Command('add', False), Command('rm', False)).match(
[Argument(None, 'rm')]) == (True, [], [Command('rm', True)])
def test_optional_match():
assert Optional(Option('-a')).match([Option('-a')]) == \
(True, [], [Option('-a')])
assert Optional(Option('-a')).match([]) == (True, [], [])
assert Optional(Option('-a')).match([Option('-x')]) == \
(True, [Option('-x')], [])
assert Optional(Option('-a'), Option('-b')).match([Option('-a')]) == \
(True, [], [Option('-a')])
assert Optional(Option('-a'), Option('-b')).match([Option('-b')]) == \
(True, [], [Option('-b')])
assert Optional(Option('-a'), Option('-b')).match([Option('-x')]) == \
(True, [Option('-x')], [])
assert Optional(Argument('N')).match([Argument(None, 9)]) == \
(True, [], [Argument('N', 9)])
assert Optional(Option('-a'), Option('-b')).match(
[Option('-b'), Option('-x'), Option('-a')]) == \
(True, [Option('-x')], [Option('-a'), Option('-b')])
def test_required_match():
assert Required(Option('-a')).match([Option('-a')]) == \
(True, [], [Option('-a')])
assert Required(Option('-a')).match([]) == (False, [], [])
assert Required(Option('-a')).match([Option('-x')]) == \
(False, [Option('-x')], [])
assert Required(Option('-a'), Option('-b')).match([Option('-a')]) == \
(False, [Option('-a')], [])
def test_either_match():
assert Either(Option('-a'), Option('-b')).match(
[Option('-a')]) == (True, [], [Option('-a')])
assert Either(Option('-a'), Option('-b')).match(
[Option('-a'), Option('-b')]) == \
(True, [Option('-b')], [Option('-a')])
assert Either(Option('-a'), Option('-b')).match(
[Option('-x')]) == (False, [Option('-x')], [])
assert Either(Option('-a'), Option('-b'), Option('-c')).match(
[Option('-x'), Option('-b')]) == \
(True, [Option('-x')], [Option('-b')])
assert Either(Argument('M'),
Required(Argument('N'), Argument('M'))).match(
[Argument(None, 1), Argument(None, 2)]) == \
(True, [], [Argument('N', 1), Argument('M', 2)])
def test_one_or_more_match():
assert OneOrMore(Argument('N')).match([Argument(None, 9)]) == \
(True, [], [Argument('N', 9)])
assert OneOrMore(Argument('N')).match([]) == (False, [], [])
assert OneOrMore(Argument('N')).match([Option('-x')]) == \
(False, [Option('-x')], [])
assert OneOrMore(Argument('N')).match(
[Argument(None, 9), Argument(None, 8)]) == (
True, [], [Argument('N', 9), Argument('N', 8)])
assert OneOrMore(Argument('N')).match(
[Argument(None, 9), Option('-x'), Argument(None, 8)]) == (
True, [Option('-x')], [Argument('N', 9), Argument('N', 8)])
assert OneOrMore(Option('-a')).match(
[Option('-a'), Argument(None, 8), Option('-a')]) == \
(True, [Argument(None, 8)], [Option('-a'), Option('-a')])
assert OneOrMore(Option('-a')).match([Argument(None, 8),
Option('-x')]) == \
(False, [Argument(None, 8), Option('-x')], [])
assert OneOrMore(Required(Option('-a'), Argument('N'))).match(
[Option('-a'), Argument(None, 1), Option('-x'),
Option('-a'), Argument(None, 2)]) == \
(True, [Option('-x')],
[Option('-a'), Argument('N', 1), Option('-a'), Argument('N', 2)])
assert OneOrMore(Optional(Argument('N'))).match([Argument(None, 9)]) == \
(True, [], [Argument('N', 9)])
def test_list_argument_match():
assert Required(Argument('N'), Argument('N')).fix().match(
[Argument(None, '1'), Argument(None, '2')]) == \
(True, [], [Argument('N', ['1', '2'])])
assert OneOrMore(Argument('N')).fix().match(
[Argument(None, '1'), Argument(None, '2'), Argument(None, '3')]) == \
(True, [], [Argument('N', ['1', '2', '3'])])
assert Required(Argument('N'), OneOrMore(Argument('N'))).fix().match(
[Argument(None, '1'), Argument(None, '2'), Argument(None, '3')]) == \
(True, [], [Argument('N', ['1', '2', '3'])])
assert Required(Argument('N'), Required(Argument('N'))).fix().match(
[Argument(None, '1'), Argument(None, '2')]) == \
(True, [], [Argument('N', ['1', '2'])])
def test_basic_pattern_matching():
# ( -a N [ -x Z ] )
pattern = Required(Option('-a'), Argument('N'),
Optional(Option('-x'), Argument('Z')))
# -a N
assert pattern.match([Option('-a'), Argument(None, 9)]) == \
(True, [], [Option('-a'), Argument('N', 9)])
# -a -x N Z
assert pattern.match([Option('-a'), Option('-x'),
Argument(None, 9), Argument(None, 5)]) == \
(True, [], [Option('-a'), Argument('N', 9),
Option('-x'), Argument('Z', 5)])
# -x N Z # BZZ!
assert pattern.match([Option('-x'),
Argument(None, 9),
Argument(None, 5)]) == \
(False, [Option('-x'), Argument(None, 9), Argument(None, 5)], [])
def test_pattern_either():
assert transform(Option('-a')) == Either(Required(Option('-a')))
assert transform(Argument('A')) == Either(Required(Argument('A')))
assert transform(Required(Either(Option('-a'), Option('-b')),
Option('-c'))) == \
Either(Required(Option('-a'), Option('-c')),
Required(Option('-b'), Option('-c')))
assert transform(Optional(Option('-a'), Either(Option('-b'),
Option('-c')))) == \
Either(Required(Option('-b'), Option('-a')),
Required(Option('-c'), Option('-a')))
assert transform(Either(Option('-x'),
Either(Option('-y'), Option('-z')))) == \
Either(Required(Option('-x')),
Required(Option('-y')),
Required(Option('-z')))
assert transform(OneOrMore(Argument('N'), Argument('M'))) == \
Either(Required(Argument('N'), Argument('M'),
Argument('N'), Argument('M')))
def test_pattern_fix_repeating_arguments():
assert Option('-a').fix_repeating_arguments() == Option('-a')
assert Argument('N', None).fix_repeating_arguments() == Argument('N', None)
assert Required(Argument('N'),
Argument('N')).fix_repeating_arguments() == \
Required(Argument('N', []), Argument('N', []))
assert Either(Argument('N'),
OneOrMore(Argument('N'))).fix() == \
Either(Argument('N', []), OneOrMore(Argument('N', [])))
def test_set():
assert Argument('N') == Argument('N')
assert set([Argument('N'), Argument('N')]) == set([Argument('N')])
def test_pattern_fix_identities_1():
pattern = Required(Argument('N'), Argument('N'))
assert pattern.children[0] == pattern.children[1]
assert pattern.children[0] is not pattern.children[1]
pattern.fix_identities()
assert pattern.children[0] is pattern.children[1]
def test_pattern_fix_identities_2():
pattern = Required(Optional(Argument('X'), Argument('N')), Argument('N'))
assert pattern.children[0].children[1] == pattern.children[1]
assert pattern.children[0].children[1] is not pattern.children[1]
pattern.fix_identities()
assert pattern.children[0].children[1] is pattern.children[1]
def test_long_options_error_handling():
# with raises(DocoptLanguageError):
# docopt('Usage: prog --non-existent', '--non-existent')
# with raises(DocoptLanguageError):
# docopt('Usage: prog --non-existent')
with raises(DocoptExit):
docopt('Usage: prog', '--non-existent')
with raises(DocoptExit):
docopt('Usage: prog [--version --verbose]\n'
'Options: --version\n --verbose', '--ver')
with raises(DocoptLanguageError):
docopt('Usage: prog --long\nOptions: --long ARG')
with raises(DocoptExit):
docopt('Usage: prog --long ARG\nOptions: --long ARG', '--long')
with raises(DocoptLanguageError):
docopt('Usage: prog --long=ARG\nOptions: --long')
with raises(DocoptExit):
docopt('Usage: prog --long\nOptions: --long', '--long=ARG')
def test_short_options_error_handling():
with raises(DocoptLanguageError):
docopt('Usage: prog -x\nOptions: -x this\n -x that')
# with raises(DocoptLanguageError):
# docopt('Usage: prog -x')
with raises(DocoptExit):
docopt('Usage: prog', '-x')
with raises(DocoptLanguageError):
docopt('Usage: prog -o\nOptions: -o ARG')
with raises(DocoptExit):
docopt('Usage: prog -o ARG\nOptions: -o ARG', '-o')
def test_matching_paren():
with raises(DocoptLanguageError):
docopt('Usage: prog [a [b]')
with raises(DocoptLanguageError):
docopt('Usage: prog [a [b] ] c )')
def test_allow_double_dash():
assert docopt('usage: prog [-o] [--] <arg>\nkptions: -o',
'-- -o') == {'-o': False, '<arg>': '-o', '--': True}
assert docopt('usage: prog [-o] [--] <arg>\nkptions: -o',
'-o 1') == {'-o': True, '<arg>': '1', '--': False}
with raises(DocoptExit): # "--" is not allowed; FIXME?
docopt('usage: prog [-o] <arg>\noptions:-o', '-- -o')
def test_docopt():
doc = '''Usage: prog [-v] A
Options: -v Be verbose.'''
assert docopt(doc, 'arg') == {'-v': False, 'A': 'arg'}
assert docopt(doc, '-v arg') == {'-v': True, 'A': 'arg'}
doc = """Usage: prog [-vqr] [FILE]
prog INPUT OUTPUT
prog --help
Options:
-v print status messages
-q report only file names
-r show all occurrences of the same error
--help
"""
a = docopt(doc, '-v file.py')
assert a == {'-v': True, '-q': False, '-r': False, '--help': False,
'FILE': 'file.py', 'INPUT': None, 'OUTPUT': None}
a = docopt(doc, '-v')
assert a == {'-v': True, '-q': False, '-r': False, '--help': False,
'FILE': None, 'INPUT': None, 'OUTPUT': None}
with raises(DocoptExit): # does not match
docopt(doc, '-v input.py output.py')
with raises(DocoptExit):
docopt(doc, '--fake')
with raises(SystemExit):
docopt(doc, '--hel')
#with raises(SystemExit):
# docopt(doc, 'help') XXX Maybe help command?
def test_language_errors():
with raises(DocoptLanguageError):
docopt('no usage with colon here')
with raises(DocoptLanguageError):
docopt('usage: here \n\n and again usage: here')
def test_issue_40():
with raises(SystemExit): # i.e. shows help
docopt('usage: prog --help-commands | --help', '--help')
assert docopt('usage: prog --aabb | --aa', '--aa') == {'--aabb': False,
'--aa': True}
def test_issue34_unicode_strings():
try:
assert docopt(eval("u'usage: prog [-o <a>]'"), '') == \
{'-o': False, '<a>': None}
except SyntaxError:
pass # Python 3
def test_count_multiple_flags():
assert docopt('usage: prog [-v]', '-v') == {'-v': True}
assert docopt('usage: prog [-vv]', '') == {'-v': 0}
assert docopt('usage: prog [-vv]', '-v') == {'-v': 1}
assert docopt('usage: prog [-vv]', '-vv') == {'-v': 2}
with raises(DocoptExit):
docopt('usage: prog [-vv]', '-vvv')
assert docopt('usage: prog [-v | -vv | -vvv]', '-vvv') == {'-v': 3}
assert docopt('usage: prog -v...', '-vvvvvv') == {'-v': 6}
assert docopt('usage: prog [--ver --ver]', '--ver --ver') == {'--ver': 2}
def test_any_options_parameter():
with raises(DocoptExit):
docopt('usage: prog [options]', '-foo --bar --spam=eggs')
# assert docopt('usage: prog [options]', '-foo --bar --spam=eggs',
# any_options=True) == {'-f': True, '-o': 2,
# '--bar': True, '--spam': 'eggs'}
with raises(DocoptExit):
docopt('usage: prog [options]', '--foo --bar --bar')
# assert docopt('usage: prog [options]', '--foo --bar --bar',
# any_options=True) == {'--foo': True, '--bar': 2}
with raises(DocoptExit):
docopt('usage: prog [options]', '--bar --bar --bar -ffff')
# assert docopt('usage: prog [options]', '--bar --bar --bar -ffff',
# any_options=True) == {'--bar': 3, '-f': 4}
with raises(DocoptExit):
docopt('usage: prog [options]', '--long=arg --long=another')
# assert docopt('usage: prog [options]', '--long=arg --long=another',
# any_options=True) == {'--long': ['arg', 'another']}
#def test_options_shortcut_multiple_commands():
# # any_options is disabled
# assert docopt('usage: prog c1 [options] prog c2 [options]',
# 'c2 -o', any_options=True) == {'-o': True, 'c1': False, 'c2': True}
# assert docopt('usage: prog c1 [options] prog c2 [options]',
# 'c1 -o', any_options=True) == {'-o': True, 'c1': True, 'c2': False}
def test_default_value_for_positional_arguments():
doc = """Usage: prog [--data=<data>...]\n
Options:\n\t-d --data=<arg> Input data [default: x]
"""
a = docopt(doc, '')
assert a == {'--data': ['x']}
doc = """Usage: prog [--data=<data>...]\n
Options:\n\t-d --data=<arg> Input data [default: x y]
"""
a = docopt(doc, '')
assert a == {'--data': ['x', 'y']}
doc = """Usage: prog [--data=<data>...]\n
Options:\n\t-d --data=<arg> Input data [default: x y]
"""
a = docopt(doc, '--data=this')
assert a == {'--data': ['this']}
#def test_parse_defaults():
# assert parse_defaults("""usage: prog
# options:
# -o, --option <o>
# --another <a> description
# [default: x]
# <a>
# <another> description [default: y]""") == \
# ([Option('-o', '--option', 1, None),
# Option(None, '--another', 1, 'x')],
# [Argument('<a>', None),
# Argument('<another>', 'y')])
#
# doc = '''
# -h, --help Print help message.
# -o FILE Output file.
# --verbose Verbose mode.'''
# assert parse_defaults(doc)[0] == [Option('-h', '--help'),
# Option('-o', None, 1),
# Option(None, '--verbose')]
def test_issue_59():
assert docopt('usage: prog --long=<a>', '--long=') == {'--long': ''}
assert docopt('usage: prog -l <a>\n'
'options: -l <a>', ['-l', '']) == {'-l': ''}
def test_options_first():
assert docopt('usage: prog [--opt] [<args>...]',
'--opt this that') == {'--opt': True,
'<args>': ['this', 'that']}
assert docopt('usage: prog [--opt] [<args>...]',
'this that --opt') == {'--opt': True,
'<args>': ['this', 'that']}
assert docopt('usage: prog [--opt] [<args>...]',
'this that --opt',
options_first=True) == {'--opt': False,
'<args>': ['this', 'that', '--opt']}
def test_issue_68_options_shortcut_does_not_include_options_in_usage_pattern():
args = docopt('usage: prog [-ab] [options]\n'
'options: -x\n -y', '-ax')
# Need to use `is` (not `==`) since we want to make sure
# that they are not 1/0, but strictly True/False:
assert args['-a'] is True
assert args['-b'] is False
assert args['-x'] is True
assert args['-y'] is False
def test_issue_65_evaluate_argv_when_called_not_when_imported():
import sys
sys.argv = 'prog -a'.split()
assert docopt('usage: prog [-ab]') == {'-a': True, '-b': False}
sys.argv = 'prog -b'.split()
assert docopt('usage: prog [-ab]') == {'-a': False, '-b': True}
def test_issue_71_double_dash_is_not_a_valid_option_argument():
with raises(DocoptExit):
docopt('usage: prog [--log=LEVEL] [--] <args>...', '--log -- 1 2')
with raises(DocoptExit):
docopt('''usage: prog [-l LEVEL] [--] <args>...
options: -l LEVEL''', '-l -- 1 2')
usage = '''usage: this
usage:hai
usage: this that
usage: foo
bar
PROGRAM USAGE:
foo
bar
usage:
\ttoo
\ttar
Usage: eggs spam
BAZZ
usage: pit stop'''
def test_parse_section():
assert parse_section('usage:', 'foo bar fizz buzz') == []
assert parse_section('usage:', 'usage: prog') == ['usage: prog']
assert parse_section('usage:',
'usage: -x\n -y') == ['usage: -x\n -y']
assert parse_section('usage:', usage) == [
'usage: this',
'usage:hai',
'usage: this that',
'usage: foo\n bar',
'PROGRAM USAGE:\n foo\n bar',
'usage:\n\ttoo\n\ttar',
'Usage: eggs spam',
'usage: pit stop',
]
def test_issue_126_defaults_not_parsed_correctly_when_tabs():
section = 'Options:\n\t--foo=<arg> [default: bar]'
assert parse_defaults(section) == [Option(None, '--foo', 1, 'bar')]
|
mit
|
jchrismer/PiQuad
|
Calibration/adjusted_least_squars.py
|
1
|
4915
|
'''
=====================================================================================
Python implementation of the ALS (Adujusted Least Square) ellipsoid fitting algorithm
=====================================================================================
Sources:
"Consistent least squares fitting of ellipsoids" by Ivan Markovsky, Alexander Kukush, and Sabine Van Huffel
http://eprints.soton.ac.uk/263295/1/ellest_comp_published.pdf
Note this is only for 3 dimensional cases (some of the vectors describing indecies are hard coded)
Author-
Joseph Chrismer
History:
V1.0 - 2/1/16
To do:
- Minimize over var (?)
- Check step (9) to be positive semi definite
'''
import numpy as np
import math
def tensor_function(k_index,input,var):
if k_index == 0:
return 1
elif k_index == 1:
return input
elif k_index == 2:
return (input**2 - var)
elif k_index == 3:
return (input**3 - 3*input*var)
#K = 4 (default case)
return (input**4 - 6*input**2*var + 3*var**2)
def als_fit(X,var):
n = np.size(X,0)
m = np.size(X,1)
# (1) Form the Tensor
ten = np.zeros((5,n,m))
for k in range(0,5):
for i in range(0,n):
for l in range(0,m):
ten[k,i,l] = tensor_function(k,X[i,l],var)
# (2) Create nβ x 2 matrix M
M1 = [1,1,2,1,2,3,1,2,3,0]
M2 = [1,2,2,3,3,3,0,0,0,0]
M = np.matrix([M1,M2]).transpose()
n_beta = int((n+1)*n/2 + n + 1)
# (3) Form the tensor R = nβ x nβ x n
R = np.zeros((n_beta,n_beta,n))
for p in range(0,n_beta):
for q in range(p,n_beta):
for i in range(0,n):
#Python starts counting at zero so we need to offset by 1
offset = i+1
#Multiplying by 1 forces the type to be integer, otherwise '+' is interpreted as logical OR
R[p,q,i] = 1*(M[p,0] == offset) + 1*(M[p,1] == offset) + 1*(M[q,0] == offset) +1*(M[q,1] == offset)
# (4) Compute ηals
eta_als = np.zeros((n_beta,n_beta))
sum = 0
product = 1
for p in range(0,n_beta):
for q in range (p,n_beta):
for l in range(0,m):
# Compute the product
for i in range (0,n):
product = product * ( ten[R[p,q,i] , i, l] )
# Update the sum
sum = sum + product;
product = 1;
#Store value in eta_als
eta_als[p,q] = sum
sum = 0
#Define D (usually computed on the fly, but here it's hard coded for ellipsoids)
D = [1,3,4]
# (6) Form the symmetric matrix Ψals
psi_als = np.zeros((n_beta,n_beta))
for p in range(0,n_beta):
for q in range (0,n_beta):
if q >= p:
if((p in D) and (q in D)):
psi_als[p,q] = 4*eta_als[p,q]
elif((p not in D) and (q not in D)):
psi_als[p,q] = eta_als[p,q]
else:
psi_als[p,q] = 2*eta_als[p,q]
# q < p
else:
psi_als[p,q] = psi_als[q,p]
# (7) find vector and value of Ψals associated with the smallest eigenvalue (again hard coded for ellipsoid only)
evals,evecs = np.linalg.eig(psi_als)
#Note numpy's implementation of eig returns an UNORDERED array for it's eigien values
min_index = np.argmin(evals)
min_evec = evecs[:,min_index]
#number of parameters in A
na = (n+1)*n/2
#Stack min_evec into a (corresponds to function -) Note that it is done COLUMNWISE w/res to an upper triangle
a = np.matrix([ [min_evec[0], min_evec[1], min_evec[3]],
[0 , min_evec[2], min_evec[4]],
[0 , 0 , min_evec[5]] ])
#Constuct parameters corresponds to the 9 standard components of an ellipsoid (linear, combination and scalar)
a = a + a.transpose() - np.diag(np.diag(a))
b = min_evec[6:9]
d = min_evec[9]
#(8) Find the center and parameter vector ah
center = -1/2*np.dot(np.linalg.inv(a),b)
center_a_centerT = np.dot(np.dot(center,a),center.transpose())
#Numpy doesnt type a 1x1 matrix into a scalar so center_a_centerT[0,0] has to be used
ah = (1/(center_a_centerT[0,0]-d))*a
#(9) Check for PSD (ADD LATER)
#(10) Find transformation matrix which maps the given ellipsoid to a sphere
evals,Q = np.linalg.eig(ah)
#find the radius (Numpy preforms pointer assignment on '=' so radii needs to be instantiated first)
radii = np.zeros((3,1))
for i in range(0,3):
radii[i] = math.sqrt(1/evals[i])
min_axis = np.min(radii)
D_U = np.diag( [math.sqrt(evals[0]),math.sqrt(evals[1]),math.sqrt(evals[2])] )
D_U = D_U*min_axis
#Calculate the transformation matrix ALS_inv
Q_DU = np.dot(Q,D_U)
ALS_inv = np.dot(Q_DU,(np.linalg.inv(Q)))
return ALS_inv,center
|
gpl-3.0
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-relay/azure/mgmt/relay/models/authorization_rule.py
|
2
|
1513
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AuthorizationRule(Resource):
"""Description of a namespace authorization rule.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param rights: The rights associated with the rule.
:type rights: list[str or ~azure.mgmt.relay.models.AccessRights]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'rights': {'unique': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'rights': {'key': 'properties.rights', 'type': '[AccessRights]'},
}
def __init__(self, rights=None):
super(AuthorizationRule, self).__init__()
self.rights = rights
|
mit
|
scorphus/scrapy
|
setup.py
|
15
|
1521
|
from os.path import dirname, join
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name='Scrapy',
version=version,
url='http://scrapy.org',
description='A high-level Web Crawling and Web Scraping framework',
long_description=open('README.rst').read(),
author='Scrapy developers',
maintainer='Pablo Hoffman',
maintainer_email='[email protected]',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['scrapy = scrapy.cmdline:execute']
},
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'Twisted>=10.0.0',
'w3lib>=1.8.0',
'queuelib',
'lxml',
'pyOpenSSL',
'cssselect>=0.9',
'six>=1.5.2',
],
)
|
bsd-3-clause
|
bmotlaghFLT/FLT_PhantomJS
|
src/qt/qtwebkit/Source/JavaScriptCore/KeywordLookupGenerator.py
|
117
|
11965
|
# Copyright (C) 2011 Apple Inc. All rights reserved.
# Copyright (C) 2012 Sony Network Entertainment. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import string
import operator
keywordsText = open(sys.argv[1]).read()
# A second argument signifies that the output
# should be redirected to a file
redirect_to_file = len(sys.argv) > 2
# Change stdout to point to the file if requested
if redirect_to_file:
file_output = open(sys.argv[-1], "w")
sys.stdout = file_output
# Observed weights of the most common keywords, rounded to 2.s.d
keyWordWeights = {
"catch": 0.01,
"try": 0.01,
"while": 0.01,
"case": 0.01,
"break": 0.01,
"new": 0.01,
"in": 0.01,
"typeof": 0.02,
"true": 0.02,
"false": 0.02,
"for": 0.03,
"null": 0.03,
"else": 0.03,
"return": 0.13,
"var": 0.13,
"if": 0.16,
"function": 0.18,
"this": 0.18,
}
def allWhitespace(str):
for c in str:
if not(c in string.whitespace):
return False
return True
def parseKeywords(keywordsText):
if sys.platform == "cygwin":
keywordsText = keywordsText.replace("\r\n", "\n")
lines = keywordsText.split("\n")
lines = [line.split("#")[0] for line in lines]
lines = [line for line in lines if (not allWhitespace(line))]
name = lines[0].split()
terminator = lines[-1]
if not name[0] == "@begin":
raise Exception("expected description beginning with @begin")
if not terminator == "@end":
raise Exception("expected description ending with @end")
lines = lines[1:-1] # trim off the old heading
return [line.split() for line in lines]
def makePadding(size):
str = ""
for i in range(size):
str = str + " "
return str
class Trie:
def __init__(self, prefix):
self.prefix = prefix
self.keys = {}
self.value = None
def insert(self, key, value):
if len(key) == 0:
self.value = value
return
if not (key[0] in self.keys):
self.keys[key[0]] = Trie(key[0])
self.keys[key[0]].insert(key[1:], value)
def coalesce(self):
keys = {}
for k, v in self.keys.items():
t = v.coalesce()
keys[t.prefix] = t
self.keys = keys
if self.value != None:
return self
if len(self.keys) != 1:
return self
# Python 3: for() loop for compatibility. Use next() when Python 2.6 is the baseline.
for (prefix, suffix) in self.keys.items():
res = Trie(self.prefix + prefix)
res.value = suffix.value
res.keys = suffix.keys
return res
def fillOut(self, prefix=""):
self.fullPrefix = prefix + self.prefix
weight = 0
if self.fullPrefix in keyWordWeights:
weight = weight + keyWordWeights[self.fullPrefix]
self.selfWeight = weight
for trie in self.keys.values():
trie.fillOut(self.fullPrefix)
weight = weight + trie.weight
self.keys = [(trie.prefix, trie) for trie in sorted(self.keys.values(), key=operator.attrgetter('weight'), reverse=True)]
self.weight = weight
def printSubTreeAsC(self, typeName, indent):
str = makePadding(indent)
if self.value != None:
print(str + "if (!isIdentPart(code[%d])) {" % (len(self.fullPrefix)))
print(str + " internalShift<%d>();" % len(self.fullPrefix))
print(str + " if (shouldCreateIdentifier)")
print(str + (" data->ident = &m_vm->propertyNames->%sKeyword;" % self.fullPrefix))
print(str + " return " + self.value + ";")
print(str + "}")
rootIndex = len(self.fullPrefix)
itemCount = 0
for k, trie in self.keys:
baseIndex = rootIndex
if (baseIndex > 0) and (len(k) == 3):
baseIndex = baseIndex - 1
k = trie.fullPrefix[baseIndex] + k
test = [("'%s'" % c) for c in k]
if len(test) == 1:
comparison = "code[%d] == %s" % (baseIndex, test[0])
else:
base = "code"
if baseIndex > 0:
base = "code + %d" % baseIndex
comparison = ("COMPARE_%d%sS(%s, " % (len(test), typeName, base)) + ", ".join(test) + ")"
if itemCount == 0:
print(str + "if (" + comparison + ") {")
else:
print(str + "} else if (" + comparison + ") {")
trie.printSubTreeAsC(typeName, indent + 4)
itemCount = itemCount + 1
if itemCount == len(self.keys):
print(str + "}")
def maxLength(self):
max = len(self.fullPrefix)
for (_, trie) in self.keys:
l = trie.maxLength()
if l > max:
max = l
return max
def printAsC(self):
print("namespace JSC {")
print("")
print("static ALWAYS_INLINE bool isIdentPart(LChar c);")
print("static ALWAYS_INLINE bool isIdentPart(UChar c);")
# max length + 1 so we don't need to do any bounds checking at all
print("static const int maxTokenLength = %d;" % (self.maxLength() + 1))
print("")
print("template <>")
print("template <bool shouldCreateIdentifier> ALWAYS_INLINE JSTokenType Lexer<UChar>::parseKeyword(JSTokenData* data)")
print("{")
print(" ASSERT(m_codeEnd - m_code >= maxTokenLength);")
print("")
print(" const UChar* code = m_code;")
self.printSubTreeAsC("UCHAR", 4)
print(" return IDENT;")
print("}")
print("")
print("template <>")
print("template <bool shouldCreateIdentifier> ALWAYS_INLINE JSTokenType Lexer<LChar>::parseKeyword(JSTokenData* data)")
print("{")
print(" ASSERT(m_codeEnd - m_code >= maxTokenLength);")
print("")
print(" const LChar* code = m_code;")
self.printSubTreeAsC("CHAR", 4)
print(" return IDENT;")
print("}")
print("")
print("} // namespace JSC")
keywords = parseKeywords(keywordsText)
trie = Trie("")
for k, v in keywords:
trie.insert(k, v)
trie.coalesce()
trie.fillOut()
print("// This file was generated by KeywordLookupGenerator.py. Do not edit.")
print("""
#if CPU(NEEDS_ALIGNED_ACCESS)
#define COMPARE_2CHARS(address, char1, char2) \\
(((address)[0] == char1) && ((address)[1] == char2))
#define COMPARE_4CHARS(address, char1, char2, char3, char4) \\
(COMPARE_2CHARS(address, char1, char2) && COMPARE_2CHARS((address) + 2, char3, char4))
#define COMPARE_2UCHARS(address, char1, char2) \\
(((address)[0] == char1) && ((address)[1] == char2))
#define COMPARE_4UCHARS(address, char1, char2, char3, char4) \\
(COMPARE_2UCHARS(address, char1, char2) && COMPARE_2UCHARS((address) + 2, char3, char4))
#else // CPU(NEEDS_ALIGNED_ACCESS)
#if CPU(BIG_ENDIAN)
#define CHARPAIR_TOUINT16(a, b) \\
((((uint16_t)(a)) << 8) + (uint16_t)(b))
#define CHARQUAD_TOUINT32(a, b, c, d) \\
((((uint32_t)(CHARPAIR_TOUINT16(a, b))) << 16) + CHARPAIR_TOUINT16(c, d))
#define UCHARPAIR_TOUINT32(a, b) \\
((((uint32_t)(a)) << 16) + (uint32_t)(b))
#define UCHARQUAD_TOUINT64(a, b, c, d) \\
((((uint64_t)(UCHARQUAD_TOUINT64(a, b))) << 32) + UCHARPAIR_TOUINT32(c, d))
#else // CPU(BIG_ENDIAN)
#define CHARPAIR_TOUINT16(a, b) \\
((((uint16_t)(b)) << 8) + (uint16_t)(a))
#define CHARQUAD_TOUINT32(a, b, c, d) \\
((((uint32_t)(CHARPAIR_TOUINT16(c, d))) << 16) + CHARPAIR_TOUINT16(a, b))
#define UCHARPAIR_TOUINT32(a, b) \\
((((uint32_t)(b)) << 16) + (uint32_t)(a))
#define UCHARQUAD_TOUINT64(a, b, c, d) \\
((((uint64_t)(UCHARPAIR_TOUINT32(c, d))) << 32) + UCHARPAIR_TOUINT32(a, b))
#endif // CPU(BIG_ENDIAN)
#define COMPARE_2CHARS(address, char1, char2) \\
(((uint16_t*)(address))[0] == CHARPAIR_TOUINT16(char1, char2))
#define COMPARE_2UCHARS(address, char1, char2) \\
(((uint32_t*)(address))[0] == UCHARPAIR_TOUINT32(char1, char2))
#if CPU(X86_64)
#define COMPARE_4CHARS(address, char1, char2, char3, char4) \\
(((uint32_t*)(address))[0] == CHARQUAD_TOUINT32(char1, char2, char3, char4))
#define COMPARE_4UCHARS(address, char1, char2, char3, char4) \\
(((uint64_t*)(address))[0] == UCHARQUAD_TOUINT64(char1, char2, char3, char4))
#else // CPU(X86_64)
#define COMPARE_4CHARS(address, char1, char2, char3, char4) \\
(COMPARE_2CHARS(address, char1, char2) && COMPARE_2CHARS((address) + 2, char3, char4))
#define COMPARE_4UCHARS(address, char1, char2, char3, char4) \\
(COMPARE_2UCHARS(address, char1, char2) && COMPARE_2UCHARS((address) + 2, char3, char4))
#endif // CPU(X86_64)
#endif // CPU(NEEDS_ALIGNED_ACCESS)
#define COMPARE_3CHARS(address, char1, char2, char3) \\
(COMPARE_2CHARS(address, char1, char2) && ((address)[2] == (char3)))
#define COMPARE_3UCHARS(address, char1, char2, char3) \\
(COMPARE_2UCHARS(address, char1, char2) && ((address)[2] == (char3)))
#define COMPARE_5CHARS(address, char1, char2, char3, char4, char5) \\
(COMPARE_4CHARS(address, char1, char2, char3, char4) && ((address)[4] == (char5)))
#define COMPARE_5UCHARS(address, char1, char2, char3, char4, char5) \\
(COMPARE_4UCHARS(address, char1, char2, char3, char4) && ((address)[4] == (char5)))
#define COMPARE_6CHARS(address, char1, char2, char3, char4, char5, char6) \\
(COMPARE_4CHARS(address, char1, char2, char3, char4) && COMPARE_2CHARS(address + 4, char5, char6))
#define COMPARE_6UCHARS(address, char1, char2, char3, char4, char5, char6) \\
(COMPARE_4UCHARS(address, char1, char2, char3, char4) && COMPARE_2UCHARS(address + 4, char5, char6))
#define COMPARE_7CHARS(address, char1, char2, char3, char4, char5, char6, char7) \\
(COMPARE_4CHARS(address, char1, char2, char3, char4) && COMPARE_4CHARS(address + 3, char4, char5, char6, char7))
#define COMPARE_7UCHARS(address, char1, char2, char3, char4, char5, char6, char7) \\
(COMPARE_4UCHARS(address, char1, char2, char3, char4) && COMPARE_4UCHARS(address + 3, char4, char5, char6, char7))
#define COMPARE_8CHARS(address, char1, char2, char3, char4, char5, char6, char7, char8) \\
(COMPARE_4CHARS(address, char1, char2, char3, char4) && COMPARE_4CHARS(address + 4, char5, char6, char7, char8))
#define COMPARE_8UCHARS(address, char1, char2, char3, char4, char5, char6, char7, char8) \\
(COMPARE_4UCHARS(address, char1, char2, char3, char4) && COMPARE_4UCHARS(address + 4, char5, char6, char7, char8))
""")
trie.printAsC()
# Close the redirected file if requested
if (redirect_to_file):
file_output.close()
sys.stdout = sys.__stdout__
|
bsd-3-clause
|
google-code/android-scripting
|
python/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/rotmodel.py
|
131
|
2149
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from google.appengine.ext import db
class ROTModel(db.Model):
"""
ROTModel overrides the db.Model put function, having it retry
up to 3 times when it encounters a datastore timeout. This is
to try an maximize the chance the data makes it into the datastore
when attempted. If it fails, it raises the db.Timeout error and the
calling application will need to handle that.
"""
def put(self):
count = 0
while count < 3:
try:
return db.Model.put(self)
except db.Timeout:
count += 1
else:
raise db.Timeout()
|
apache-2.0
|
DazWorrall/ansible
|
lib/ansible/modules/network/avi/avi_api_session.py
|
27
|
8381
|
#!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi ([email protected]) GitHub ID: grastogi23
#
# module_check: not supported
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_api_session
author: Gaurav Rastogi ([email protected])
short_description: Avi API Module
description:
- This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/)
- This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them.
version_added: 2.3
requirements: [ avisdk ]
options:
http_method:
description:
- Allowed HTTP methods for RESTful services and are supported by Avi Controller.
choices: ["get", "put", "post", "patch", "delete"]
required: true
data:
description:
- HTTP body in YAML or JSON format.
params:
description:
- Query parameters passed to the HTTP API.
path:
description:
- 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).'
timeout:
description:
- Timeout (in seconds) for Avi API calls.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get Pool Information using avi_api_session
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: pool
params:
name: "{{ pool_name }}"
api_version: 16.4
register: pool_results
- name: Patch Pool with list of servers
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: patch
path: "{{ pool_path }}"
api_version: 16.4
data:
add:
servers:
- ip:
addr: 10.10.10.10
type: V4
- ip:
addr: 20.20.20.20
type: V4
register: updated_pool
- name: Fetch Pool metrics bandwidth and connections rate
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: analytics/metrics/pool
api_version: 16.4
params:
name: "{{ pool_name }}"
metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns
step: 300
limit: 10
register: pool_metrics
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from copy import deepcopy
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, ansible_return, HAS_AVI)
from avi.sdk.avi_api import ApiSession
from avi.sdk.utils.ansible_utils import avi_obj_cmp, cleanup_absent_fields
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
http_method=dict(required=True,
choices=['get', 'put', 'post', 'patch',
'delete']),
path=dict(type='str', required=True),
params=dict(type='dict'),
data=dict(type='jsonarg'),
timeout=dict(type='int', default=60)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
tenant_uuid = module.params.get('tenant_uuid', None)
api = ApiSession.get_session(
module.params['controller'], module.params['username'],
module.params['password'], tenant=module.params['tenant'],
tenant_uuid=tenant_uuid)
tenant = module.params.get('tenant', '')
timeout = int(module.params.get('timeout'))
# path is a required argument
path = module.params.get('path', '')
params = module.params.get('params', None)
data = module.params.get('data', None)
# Get the api_version from module.
api_version = module.params.get('api_version', '16.4')
if data is not None:
data = json.loads(data)
method = module.params['http_method']
existing_obj = None
changed = method != 'get'
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
if method == 'post':
# need to check if object already exists. In that case
# change the method to be put
gparams['name'] = data['name']
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
try:
existing_obj = rsp.json()['results'][0]
except IndexError:
# object is not found
pass
else:
# object is present
method = 'put'
path += '/' + existing_obj['uuid']
if method == 'put':
# put can happen with when full path is specified or it is put + post
if existing_obj is None:
using_collection = False
if (len(path.split('/')) == 1) and ('name' in data):
gparams['name'] = data['name']
using_collection = True
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
rsp_data = rsp.json()
if using_collection:
if rsp_data['results']:
existing_obj = rsp_data['results'][0]
path += '/' + existing_obj['uuid']
else:
method = 'post'
else:
if rsp.status_code == 404:
method = 'post'
else:
existing_obj = rsp_data
if existing_obj:
changed = not avi_obj_cmp(data, existing_obj)
cleanup_absent_fields(data)
if method == 'patch':
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
existing_obj = rsp.json()
if (method == 'put' and changed) or (method != 'put'):
fn = getattr(api, method)
rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout,
params=params, data=data, api_version=api_version)
else:
rsp = None
if method == 'delete' and rsp.status_code == 404:
changed = False
rsp.status_code = 200
if method == 'patch' and existing_obj and rsp.status_code < 299:
# Ideally the comparison should happen with the return values
# from the patch API call. However, currently Avi API are
# returning different hostname when GET is used vs Patch.
# tracked as AV-12561
if path.startswith('pool'):
time.sleep(1)
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
new_obj = rsp.json()
changed = not avi_obj_cmp(new_obj, existing_obj)
if rsp is None:
return module.exit_json(changed=changed, obj=existing_obj)
return ansible_return(module, rsp, changed, req=data)
if __name__ == '__main__':
main()
|
gpl-3.0
|
architecture-building-systems/CityEnergyAnalyst
|
cea/technologies/thermal_network/simplified_thermal_network.py
|
2
|
29336
|
import math
import time
import geopandas as gpd
import numpy as np
import pandas as pd
import wntr
import cea.config
import cea.inputlocator
import cea.technologies.substation as substation
from cea.constants import P_WATER_KGPERM3, FT_WATER_TO_PA, FT_TO_M, M_WATER_TO_PA, HEAT_CAPACITY_OF_WATER_JPERKGK, SHAPEFILE_TOLERANCE
from cea.optimization.constants import PUMP_ETA
from cea.optimization.preprocessing.preprocessing_main import get_building_names_with_load
from cea.technologies.thermal_network.thermal_network_loss import calc_temperature_out_per_pipe
from cea.resources import geothermal
from cea.technologies.constants import NETWORK_DEPTH
from cea.utilities.epwreader import epw_reader
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def calculate_ground_temperature(locator):
"""
calculate ground temperatures.
:param locator:
:return: list of ground temperatures, one for each hour of the year
:rtype: list[np.float64]
"""
weather_file = locator.get_weather_file()
T_ambient_C = epw_reader(weather_file)['drybulb_C']
network_depth_m = NETWORK_DEPTH # [m]
T_ground_K = geothermal.calc_ground_temperature(locator, T_ambient_C.values, network_depth_m)
return T_ground_K
def extract_network_from_shapefile(edge_shapefile_df, node_shapefile_df):
"""
Extracts network data into DataFrames for pipes and nodes in the network
:param edge_shapefile_df: DataFrame containing all data imported from the edge shapefile
:param node_shapefile_df: DataFrame containing all data imported from the node shapefile
:type edge_shapefile_df: DataFrame
:type node_shapefile_df: DataFrame
:return node_df: DataFrame containing all nodes and their corresponding coordinates
:return edge_df: list of edges and their corresponding lengths and start and end nodes
:rtype node_df: DataFrame
:rtype edge_df: DataFrame
"""
# create node dictionary with plant and consumer nodes
node_dict = {}
node_shapefile_df.set_index("Name", inplace=True)
node_shapefile_df = node_shapefile_df.astype('object')
node_shapefile_df['coordinates'] = node_shapefile_df['geometry'].apply(lambda x: x.coords[0])
# sort node_df by index number
node_sorted_index = node_shapefile_df.index.to_series().str.split('NODE', expand=True)[1].apply(int).sort_values(
ascending=True)
node_shapefile_df = node_shapefile_df.reindex(index=node_sorted_index.index)
for node, row in node_shapefile_df.iterrows():
coord_node = row['geometry'].coords[0]
coord_node_round = (round(coord_node[0], SHAPEFILE_TOLERANCE), round(coord_node[1], SHAPEFILE_TOLERANCE))
node_dict[coord_node_round] = node
# create edge dictionary with pipe lengths and start and end nodes
# complete node dictionary with missing nodes (i.e., joints)
edge_shapefile_df.set_index("Name", inplace=True)
edge_shapefile_df = edge_shapefile_df.astype('object')
edge_shapefile_df['coordinates'] = edge_shapefile_df['geometry'].apply(lambda x: x.coords[0])
# sort edge_df by index number
edge_sorted_index = edge_shapefile_df.index.to_series().str.split('PIPE', expand=True)[1].apply(int).sort_values(
ascending=True)
edge_shapefile_df = edge_shapefile_df.reindex(index=edge_sorted_index.index)
# assign edge properties
edge_shapefile_df['start node'] = ''
edge_shapefile_df['end node'] = ''
for pipe, row in edge_shapefile_df.iterrows():
# get the length of the pipe and add to dataframe
edge_coords = row['geometry'].coords
edge_shapefile_df.loc[pipe, 'length_m'] = row['geometry'].length
start_node = (round(edge_coords[0][0], SHAPEFILE_TOLERANCE), round(edge_coords[0][1], SHAPEFILE_TOLERANCE))
end_node = (round(edge_coords[1][0], SHAPEFILE_TOLERANCE), round(edge_coords[1][1], SHAPEFILE_TOLERANCE))
if start_node in node_dict.keys():
edge_shapefile_df.loc[pipe, 'start node'] = node_dict[start_node]
else:
print(f"The start node of {pipe} has no match in node_dict, check precision of the coordinates.")
if end_node in node_dict.keys():
edge_shapefile_df.loc[pipe, 'end node'] = node_dict[end_node]
else:
print(f"The end node of {pipe} has no match in node_dict, check precision of the coordinates.")
return node_shapefile_df, edge_shapefile_df
def get_thermal_network_from_shapefile(locator, network_type, network_name):
"""
This function reads the existing node and pipe network from a shapefile and produces an edge-node incidence matrix
(as defined by Oppelt et al., 2016) as well as the edge properties (length, start node, and end node) and node
coordinates.
"""
# import shapefiles containing the network's edges and nodes
network_edges_df = gpd.read_file(locator.get_network_layout_edges_shapefile(network_type, network_name))
network_nodes_df = gpd.read_file(locator.get_network_layout_nodes_shapefile(network_type, network_name))
# check duplicated NODE/PIPE IDs
duplicated_nodes = network_nodes_df[network_nodes_df.Name.duplicated(keep=False)]
duplicated_edges = network_edges_df[network_edges_df.Name.duplicated(keep=False)]
if duplicated_nodes.size > 0:
raise ValueError('There are duplicated NODE IDs:', duplicated_nodes)
if duplicated_edges.size > 0:
raise ValueError('There are duplicated PIPE IDs:', duplicated_nodes)
# get node and pipe information
node_df, edge_df = extract_network_from_shapefile(network_edges_df, network_nodes_df)
return edge_df, node_df
def calc_max_diameter(volume_flow_m3s, pipe_catalog, velocity_ms, peak_load_percentage):
volume_flow_m3s_corrected_to_design = volume_flow_m3s * peak_load_percentage / 100
diameter_m = math.sqrt((volume_flow_m3s_corrected_to_design / velocity_ms) * (4 / math.pi))
selection_of_catalog = pipe_catalog.loc[(pipe_catalog['D_int_m'] - diameter_m).abs().argsort()[:1]]
D_int_m = selection_of_catalog['D_int_m'].values[0]
Pipe_DN = selection_of_catalog['Pipe_DN'].values[0]
D_ext_m = selection_of_catalog['D_ext_m'].values[0]
D_ins_m = selection_of_catalog['D_ins_m'].values[0]
return Pipe_DN, D_ext_m, D_int_m, D_ins_m
def calc_head_loss_m(diameter_m, max_volume_flow_rates_m3s, coefficient_friction, length_m):
hf_L = (10.67 / (coefficient_friction ** 1.85)) * (max_volume_flow_rates_m3s ** 1.852) / (diameter_m ** 4.8704)
head_loss_m = hf_L * length_m
return head_loss_m
def calc_linear_thermal_loss_coefficient(diamter_ext_m, diamter_int_m, diameter_insulation_m):
r_out_m = diamter_ext_m / 2
r_in_m = diamter_int_m / 2
r_s_m = diameter_insulation_m / 2
k_pipe_WpermK = 58.7 # steel pipe
k_ins_WpermK = 0.059 # scalcium silicate insulation
resistance_mKperW = ((math.log(r_out_m / r_in_m) / k_pipe_WpermK) + (math.log(r_s_m / r_out_m) / k_ins_WpermK))
K_WperKm = 2 * math.pi / resistance_mKperW
return K_WperKm
def calc_thermal_loss_per_pipe(T_in_K, m_kgpers, T_ground_K, k_kWperK):
T_out_K = calc_temperature_out_per_pipe(T_in_K, m_kgpers, k_kWperK, T_ground_K)
DT = T_in_K - T_out_K
Q_loss_kWh = DT * m_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
return Q_loss_kWh
def thermal_network_simplified(locator, config, network_name):
# local variables
network_type = config.thermal_network.network_type
min_head_substation_kPa = config.thermal_network.min_head_substation
thermal_transfer_unit_design_head_m = min_head_substation_kPa * 1000 / M_WATER_TO_PA
coefficient_friction_hazen_williams = config.thermal_network.hw_friction_coefficient
velocity_ms = config.thermal_network.peak_load_velocity
fraction_equivalent_length = config.thermal_network.equivalent_length_factor
peak_load_percentage = config.thermal_network.peak_load_percentage
# GET INFORMATION ABOUT THE NETWORK
edge_df, node_df = get_thermal_network_from_shapefile(locator, network_type, network_name)
# GET INFORMATION ABOUT THE DEMAND OF BUILDINGS AND CONNECT TO THE NODE INFO
# calculate substations for all buildings
# local variables
total_demand = pd.read_csv(locator.get_total_demand())
volume_flow_m3pers_building = pd.DataFrame()
T_sup_K_building = pd.DataFrame()
T_re_K_building = pd.DataFrame()
Q_demand_kWh_building = pd.DataFrame()
if network_type == "DH":
buildings_name_with_heating = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr')
buildings_name_with_space_heating = get_building_names_with_load(total_demand, load_name='Qhs_sys_MWhyr')
DHN_barcode = "0"
if (buildings_name_with_heating != [] and buildings_name_with_space_heating != []):
building_names = [building for building in buildings_name_with_heating if building in
node_df.Building.values]
substation.substation_main_heating(locator, total_demand, building_names, DHN_barcode=DHN_barcode)
else:
raise Exception('problem here')
for building_name in building_names:
substation_results = pd.read_csv(
locator.get_optimization_substations_results_file(building_name, "DH", DHN_barcode))
volume_flow_m3pers_building[building_name] = substation_results["mdot_DH_result_kgpers"] / P_WATER_KGPERM3
T_sup_K_building[building_name] = substation_results["T_supply_DH_result_K"]
T_re_K_building[building_name] = np.where(substation_results["T_return_DH_result_K"] >273.15,
substation_results["T_return_DH_result_K"], np.nan)
Q_demand_kWh_building[building_name] = (substation_results["Q_heating_W"] + substation_results[
"Q_dhw_W"]) / 1000
if network_type == "DC":
buildings_name_with_cooling = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr')
DCN_barcode = "0"
if buildings_name_with_cooling != []:
building_names = [building for building in buildings_name_with_cooling if building in
node_df.Building.values]
substation.substation_main_cooling(locator, total_demand, building_names, DCN_barcode=DCN_barcode)
else:
raise Exception('problem here')
for building_name in building_names:
substation_results = pd.read_csv(
locator.get_optimization_substations_results_file(building_name, "DC", DCN_barcode))
volume_flow_m3pers_building[building_name] = substation_results[
"mdot_space_cooling_data_center_and_refrigeration_result_kgpers"] / P_WATER_KGPERM3
T_sup_K_building[building_name] = substation_results[
"T_supply_DC_space_cooling_data_center_and_refrigeration_result_K"]
T_re_K_building[building_name] = substation_results[
"T_return_DC_space_cooling_data_center_and_refrigeration_result_K"]
Q_demand_kWh_building[building_name] = substation_results[
"Q_space_cooling_data_center_and_refrigeration_W"] / 1000
import cea.utilities
with cea.utilities.pushd(locator.get_thermal_network_folder()):
# Create a water network model
wn = wntr.network.WaterNetworkModel()
# add loads
building_base_demand_m3s = {}
for building in volume_flow_m3pers_building.keys():
building_base_demand_m3s[building] = volume_flow_m3pers_building[building].max()
pattern_demand = (volume_flow_m3pers_building[building].values / building_base_demand_m3s[building]).tolist()
wn.add_pattern(building, pattern_demand)
# add nodes
consumer_nodes = []
building_nodes_pairs = {}
building_nodes_pairs_inversed = {}
for node in node_df.iterrows():
if node[1]["Type"] == "CONSUMER":
demand_pattern = node[1]['Building']
base_demand_m3s = building_base_demand_m3s[demand_pattern]
consumer_nodes.append(node[0])
building_nodes_pairs[node[0]] = demand_pattern
building_nodes_pairs_inversed[demand_pattern] = node[0]
wn.add_junction(node[0],
base_demand=base_demand_m3s,
demand_pattern=demand_pattern,
elevation=thermal_transfer_unit_design_head_m,
coordinates=node[1]["coordinates"])
elif node[1]["Type"] == "PLANT":
base_head = int(thermal_transfer_unit_design_head_m*1.2)
start_node = node[0]
name_node_plant = start_node
wn.add_reservoir(start_node,
base_head=base_head,
coordinates=node[1]["coordinates"])
else:
wn.add_junction(node[0],
elevation=0,
coordinates=node[1]["coordinates"])
# add pipes
for edge in edge_df.iterrows():
length_m = edge[1]["length_m"]
edge_name = edge[0]
wn.add_pipe(edge_name, edge[1]["start node"],
edge[1]["end node"],
length=length_m * (1 + fraction_equivalent_length),
roughness=coefficient_friction_hazen_williams,
minor_loss=0.0,
status='OPEN')
# add options
wn.options.time.duration = 8759 * 3600 # this indicates epanet to do one year simulation
wn.options.time.hydraulic_timestep = 60 * 60
wn.options.time.pattern_timestep = 60 * 60
wn.options.solver.accuracy = 0.01
wn.options.solver.trials = 100
# 1st ITERATION GET MASS FLOWS AND CALCULATE DIAMETER
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
max_volume_flow_rates_m3s = results.link['flowrate'].abs().max()
pipe_names = max_volume_flow_rates_m3s.index.values
pipe_catalog = pd.read_excel(locator.get_database_distribution_systems(), sheet_name='THERMAL_GRID')
Pipe_DN, D_ext_m, D_int_m, D_ins_m = zip(
*[calc_max_diameter(flow, pipe_catalog, velocity_ms=velocity_ms, peak_load_percentage=peak_load_percentage) for
flow in max_volume_flow_rates_m3s])
pipe_dn = pd.Series(Pipe_DN, pipe_names)
diameter_int_m = pd.Series(D_int_m, pipe_names)
diameter_ext_m = pd.Series(D_ext_m, pipe_names)
diameter_ins_m = pd.Series(D_ins_m, pipe_names)
# 2nd ITERATION GET PRESSURE POINTS AND MASSFLOWS FOR SIZING PUMPING NEEDS - this could be for all the year
# modify diameter and run simulations
edge_df['Pipe_DN'] = pipe_dn
edge_df['D_int_m'] = D_int_m
for edge in edge_df.iterrows():
edge_name = edge[0]
pipe = wn.get_link(edge_name)
pipe.diameter = diameter_int_m[edge_name]
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
# 3rd ITERATION GET FINAL UTILIZATION OF THE GRID (SUPPLY SIDE)
# get accumulated head loss per hour
unitary_head_ftperkft = results.link['headloss'].abs()
unitary_head_mperm = unitary_head_ftperkft * FT_TO_M / (FT_TO_M * 1000)
head_loss_m = unitary_head_mperm.copy()
for column in head_loss_m.columns.values:
length_m = edge_df.loc[column]['length_m']
head_loss_m[column] = head_loss_m[column] * length_m
reservoir_head_loss_m = head_loss_m.sum(axis=1) + thermal_transfer_unit_design_head_m*1.2 # fixme: only one thermal_transfer_unit_design_head_m from one substation?
# apply this pattern to the reservoir and get results
base_head = reservoir_head_loss_m.max()
pattern_head_m = (reservoir_head_loss_m.values / base_head).tolist()
wn.add_pattern('reservoir', pattern_head_m)
reservoir = wn.get_node(name_node_plant)
reservoir.head_timeseries.base_value = int(base_head)
reservoir.head_timeseries._pattern = 'reservoir'
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
# POSTPROCESSING
# $ POSTPROCESSING - PRESSURE/HEAD LOSSES PER PIPE PER HOUR OF THE YEAR
# at the pipes
unitary_head_loss_supply_network_ftperkft = results.link['headloss'].abs()
linear_pressure_loss_Paperm = unitary_head_loss_supply_network_ftperkft * FT_WATER_TO_PA / (FT_TO_M * 1000)
head_loss_supply_network_Pa = linear_pressure_loss_Paperm.copy()
for column in head_loss_supply_network_Pa.columns.values:
length_m = edge_df.loc[column]['length_m']
head_loss_supply_network_Pa[column] = head_loss_supply_network_Pa[column] * length_m
head_loss_return_network_Pa = head_loss_supply_network_Pa.copy(0)
# at the substations
head_loss_substations_ft = results.node['head'][consumer_nodes].abs()
head_loss_substations_Pa = head_loss_substations_ft * FT_WATER_TO_PA
#POSTPORCESSING MASSFLOW RATES
# MASS_FLOW_RATE (EDGES)
flow_rate_supply_m3s = results.link['flowrate'].abs()
massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
# $ POSTPROCESSING - PRESSURE LOSSES ACCUMULATED PER HOUR OF THE YEAR (TIMES 2 to account for return)
accumulated_head_loss_supply_Pa = head_loss_supply_network_Pa.sum(axis=1)
accumulated_head_loss_return_Pa = head_loss_return_network_Pa.sum(axis=1)
accumulated_head_loss_substations_Pa = head_loss_substations_Pa.sum(axis=1)
accumulated_head_loss_total_Pa = accumulated_head_loss_supply_Pa + accumulated_head_loss_return_Pa + accumulated_head_loss_substations_Pa
# $ POSTPROCESSING - THERMAL LOSSES PER PIPE PER HOUR OF THE YEAR (SUPPLY)
# calculate the thermal characteristics of the grid
temperature_of_the_ground_K = calculate_ground_temperature(locator)
thermal_coeffcient_WperKm = pd.Series(
np.vectorize(calc_linear_thermal_loss_coefficient)(diameter_ext_m, diameter_int_m, diameter_ins_m), pipe_names)
average_temperature_supply_K = T_sup_K_building.mean(axis=1)
thermal_losses_supply_kWh = results.link['headloss'].copy()
thermal_losses_supply_kWh.reset_index(inplace=True, drop=True)
thermal_losses_supply_Wperm = thermal_losses_supply_kWh.copy()
for pipe in pipe_names:
length_m = edge_df.loc[pipe]['length_m']
massflow_kgs = massflow_supply_kgs[pipe]
k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
k_kWperK = k_WperKm_pipe * length_m / 1000
thermal_losses_supply_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_supply_K.values,
massflow_kgs.values,
temperature_of_the_ground_K,
k_kWperK,
)
thermal_losses_supply_Wperm[pipe] = (thermal_losses_supply_kWh[pipe] / length_m) * 1000
# return pipes
average_temperature_return_K = T_re_K_building.mean(axis=1)
thermal_losses_return_kWh = results.link['headloss'].copy()
thermal_losses_return_kWh.reset_index(inplace=True, drop=True)
for pipe in pipe_names:
length_m = edge_df.loc[pipe]['length_m']
massflow_kgs = massflow_supply_kgs[pipe]
k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
k_kWperK = k_WperKm_pipe * length_m / 1000
thermal_losses_return_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_return_K.values,
massflow_kgs.values,
temperature_of_the_ground_K,
k_kWperK,
)
# WRITE TO DISK
# LINEAR PRESSURE LOSSES (EDGES)
linear_pressure_loss_Paperm.to_csv(locator.get_network_linear_pressure_drop_edges(network_type, network_name),
index=False)
# MASS_FLOW_RATE (EDGES)
flow_rate_supply_m3s = results.link['flowrate'].abs()
massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
massflow_supply_kgs.to_csv(locator.get_thermal_network_layout_massflow_edges_file(network_type, network_name),
index=False)
# VELOCITY (EDGES)
velocity_edges_ms = results.link['velocity'].abs()
velocity_edges_ms.to_csv(locator.get_thermal_network_velocity_edges_file(network_type, network_name),
index=False)
# PRESSURE LOSSES (NODES)
pressure_at_nodes_ft = results.node['pressure'].abs()
pressure_at_nodes_Pa = pressure_at_nodes_ft * FT_TO_M * M_WATER_TO_PA
pressure_at_nodes_Pa.to_csv(locator.get_network_pressure_at_nodes(network_type, network_name), index=False)
# MASS_FLOW_RATE (NODES)
# $ POSTPROCESSING - MASSFLOWRATES PER NODE PER HOUR OF THE YEAR
flow_rate_supply_nodes_m3s = results.node['demand'].abs()
massflow_supply_nodes_kgs = flow_rate_supply_nodes_m3s * P_WATER_KGPERM3
massflow_supply_nodes_kgs.to_csv(locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name),
index=False)
# thermal demand per building (no losses in the network or substations)
Q_demand_Wh_building = Q_demand_kWh_building * 1000
Q_demand_Wh_building.to_csv(locator.get_thermal_demand_csv_file(network_type, network_name), index=False)
# pressure losses total
# $ POSTPROCESSING - PUMPING NEEDS PER HOUR OF THE YEAR (TIMES 2 to account for return)
flow_rate_substations_m3s = results.node['demand'][consumer_nodes].abs()
head_loss_supply_kWperm = (linear_pressure_loss_Paperm * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
head_loss_return_kWperm = head_loss_supply_kWperm.copy()
pressure_loss_supply_edge_kW = (head_loss_supply_network_Pa * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
head_loss_return_kW = pressure_loss_supply_edge_kW.copy()
head_loss_substations_kW = (head_loss_substations_Pa * (flow_rate_substations_m3s * 3600)) / (3.6E6 * PUMP_ETA)
accumulated_head_loss_supply_kW = pressure_loss_supply_edge_kW.sum(axis=1)
accumulated_head_loss_return_kW = head_loss_return_kW.sum(axis=1)
accumulated_head_loss_substations_kW = head_loss_substations_kW.sum(axis=1)
accumulated_head_loss_total_kW = accumulated_head_loss_supply_kW + \
accumulated_head_loss_return_kW + \
accumulated_head_loss_substations_kW
head_loss_system_Pa = pd.DataFrame({"pressure_loss_supply_Pa": accumulated_head_loss_supply_Pa,
"pressure_loss_return_Pa": accumulated_head_loss_return_Pa,
"pressure_loss_substations_Pa": accumulated_head_loss_substations_Pa,
"pressure_loss_total_Pa": accumulated_head_loss_total_Pa})
head_loss_system_Pa.to_csv(locator.get_network_total_pressure_drop_file(network_type, network_name),
index=False)
# $ POSTPROCESSING - PLANT HEAT REQUIREMENT
plant_load_kWh = thermal_losses_supply_kWh.sum(axis=1) * 2 + Q_demand_kWh_building.sum(
axis=1) - accumulated_head_loss_total_kW.values
plant_load_kWh.to_csv(locator.get_thermal_network_plant_heat_requirement_file(network_type, network_name),
header=['thermal_load_kW'], index=False)
# pressure losses per piping system
pressure_loss_supply_edge_kW.to_csv(
locator.get_thermal_network_pressure_losses_edges_file(network_type, network_name), index=False)
# pressure losses per substation
head_loss_substations_kW = head_loss_substations_kW.rename(columns=building_nodes_pairs)
head_loss_substations_kW.to_csv(locator.get_thermal_network_substation_ploss_file(network_type, network_name),
index=False)
# pumping needs losses total
pumping_energy_system_kWh = pd.DataFrame({"pressure_loss_supply_kW": accumulated_head_loss_supply_kW,
"pressure_loss_return_kW": accumulated_head_loss_return_kW,
"pressure_loss_substations_kW": accumulated_head_loss_substations_kW,
"pressure_loss_total_kW": accumulated_head_loss_total_kW})
pumping_energy_system_kWh.to_csv(
locator.get_network_energy_pumping_requirements_file(network_type, network_name), index=False)
# pumping needs losses total
temperatures_plant_C = pd.DataFrame({"temperature_supply_K": average_temperature_supply_K,
"temperature_return_K": average_temperature_return_K})
temperatures_plant_C.to_csv(locator.get_network_temperature_plant(network_type, network_name), index=False)
# thermal losses
thermal_losses_supply_kWh.to_csv(locator.get_network_thermal_loss_edges_file(network_type, network_name),
index=False)
thermal_losses_supply_Wperm.to_csv(locator.get_network_linear_thermal_loss_edges_file(network_type, network_name),
index=False)
# thermal losses total
accumulated_thermal_losses_supply_kWh = thermal_losses_supply_kWh.sum(axis=1)
accumulated_thermal_losses_return_kWh = thermal_losses_return_kWh.sum(axis=1)
accumulated_thermal_loss_total_kWh = accumulated_thermal_losses_supply_kWh + accumulated_thermal_losses_return_kWh
thermal_losses_total_kWh = pd.DataFrame({"thermal_loss_supply_kW": accumulated_thermal_losses_supply_kWh,
"thermal_loss_return_kW": accumulated_thermal_losses_return_kWh,
"thermal_loss_total_kW": accumulated_thermal_loss_total_kWh})
thermal_losses_total_kWh.to_csv(locator.get_network_total_thermal_loss_file(network_type, network_name),
index=False)
# return average temperature of supply at the substations
T_sup_K_nodes = T_sup_K_building.rename(columns=building_nodes_pairs_inversed)
average_year = T_sup_K_nodes.mean(axis=1)
for node in node_df.index.values:
T_sup_K_nodes[node] = average_year
T_sup_K_nodes.to_csv(locator.get_network_temperature_supply_nodes_file(network_type, network_name),
index=False)
# return average temperature of return at the substations
T_return_K_nodes = T_re_K_building.rename(columns=building_nodes_pairs_inversed)
average_year = T_return_K_nodes.mean(axis=1)
for node in node_df.index.values:
T_return_K_nodes[node] = average_year
T_return_K_nodes.to_csv(locator.get_network_temperature_return_nodes_file(network_type, network_name),
index=False)
# summary of edges used for the calculation
fields_edges = ['length_m', 'Pipe_DN', 'Type_mat', 'D_int_m']
edge_df[fields_edges].to_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
fields_nodes = ['Type', 'Building']
node_df[fields_nodes].to_csv(locator.get_thermal_network_node_types_csv_file(network_type, network_name))
# correct diameter of network and save to the shapefile
from cea.utilities.dbf import dataframe_to_dbf, dbf_to_dataframe
fields = ['length_m', 'Pipe_DN', 'Type_mat']
edge_df = edge_df[fields]
edge_df['name'] = edge_df.index.values
network_edges_df = dbf_to_dataframe(
locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
network_edges_df = network_edges_df.merge(edge_df, left_on='Name', right_on='name', suffixes=('_x', ''))
network_edges_df = network_edges_df.drop(['Pipe_DN_x', 'Type_mat_x', 'name', 'length_m_x'], axis=1)
dataframe_to_dbf(network_edges_df,
locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
def main(config):
"""
run the whole network summary routine
"""
start = time.time()
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
network_names = config.thermal_network.network_names
if len(network_names) == 0:
network_names = ['']
for network_name in network_names:
thermal_network_simplified(locator, config, network_name)
print("done.")
print(f"total time: {time.time() - start}")
if __name__ == '__main__':
main(cea.config.Configuration())
|
mit
|
wgapl/moose
|
gui/utils/YamlData.py
|
34
|
1524
|
#!/usr/bin/python
import sys, os, commands, time, re, copy
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
from GenSyntax import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class YamlData():
def __init__(self, qt_app, app_path, recache, use_cached_syntax):
self.qt_app = qt_app
self.app_path = app_path
self.use_cached_syntax = use_cached_syntax
self.gen_syntax = GenSyntax(qt_app, app_path, use_cached_syntax)
self.yaml_data = self.gen_syntax.GetSyntax(recache)
def recache(self, recache):
self.yaml_data = self.gen_syntax.GetSyntax(recache)
def recursiveYamlDataSearch(self, path, current_yaml):
if current_yaml['name'] == path:
return current_yaml
else:
if current_yaml['subblocks']:
for child in current_yaml['subblocks']:
yaml_data = self.recursiveYamlDataSearch(path, child)
if yaml_data: # Found it in a child!
return yaml_data
else: # No children.. stop recursion
return None
def findYamlEntry(self, path):
for yaml_it in self.yaml_data:
yaml_data = self.recursiveYamlDataSearch(path, yaml_it)
if yaml_data:
return yaml_data
# This means it wasn't found
return None
|
lgpl-2.1
|
0k/odoo
|
addons/stock_account/stock.py
|
6
|
13101
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class stock_location_path(osv.osv):
_inherit = "stock.location.path"
_columns = {
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Status",),
}
_defaults = {
'invoice_state': '',
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
res = super(stock_location_path, self)._prepare_push_apply(cr, uid, rule, move, context=context)
res['invoice_state'] = rule.invoice_state or 'none'
return res
#----------------------------------------------------------
# Procurement Rule
#----------------------------------------------------------
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
_columns = {
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Status",),
}
_defaults = {
'invoice_state': '',
}
#----------------------------------------------------------
# Procurement Order
#----------------------------------------------------------
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'invoice_state': fields.selection([("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")
], "Invoice Control"),
}
def _run_move_create(self, cr, uid, procurement, context=None):
res = super(procurement_order, self)._run_move_create(cr, uid, procurement, context=context)
res.update({'invoice_state': procurement.rule_id.invoice_state or procurement.invoice_state or 'none'})
return res
_defaults = {
'invoice_state': ''
}
#----------------------------------------------------------
# Move
#----------------------------------------------------------
class stock_move(osv.osv):
_inherit = "stock.move"
_columns = {
'invoice_state': fields.selection([("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, track_visibility='onchange',
states={'draft': [('readonly', False)]}),
}
_defaults = {
'invoice_state': lambda *args, **argv: 'none'
}
def _get_master_data(self, cr, uid, move, company, context=None):
''' returns a tuple (browse_record(res.partner), ID(res.users), ID(res.currency)'''
currency = company.currency_id.id
partner = move.picking_id and move.picking_id.partner_id
if partner:
code = self.get_code_from_locs(cr, uid, move, context=context)
if partner.property_product_pricelist and code == 'outgoing':
currency = partner.property_product_pricelist.currency_id.id
return partner, uid, currency
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
return self.pool.get('account.invoice.line').create(cr, uid, invoice_line_vals, context=context)
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
return move_line.price_unit
else:
# If partner given, search price in its sale pricelist
if move_line.partner_id and move_line.partner_id.property_product_pricelist:
pricelist_obj = self.pool.get("product.pricelist")
pricelist = move_line.partner_id.property_product_pricelist.id
price = pricelist_obj.price_get(cr, uid, [pricelist],
move_line.product_id.id, move_line.product_uom_qty, move_line.partner_id.id, {
'uom': move_line.product_uom.id,
'date': move_line.date,
})[pricelist]
if price:
return price
return move_line.product_id.list_price
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
fp_obj = self.pool.get('account.fiscal.position')
# Get account_id
if inv_type in ('out_invoice', 'out_refund'):
account_id = move.product_id.property_account_income.id
if not account_id:
account_id = move.product_id.categ_id.property_account_income_categ.id
else:
account_id = move.product_id.property_account_expense.id
if not account_id:
account_id = move.product_id.categ_id.property_account_expense_categ.id
fiscal_position = partner.property_account_position
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move.product_uom.id
quantity = move.product_uom_qty
if move.product_uos:
uos_id = move.product_uos.id
quantity = move.product_uos_qty
return {
'name': move.name,
'account_id': account_id,
'product_id': move.product_id.id,
'uos_id': uos_id,
'quantity': quantity,
'price_unit': self._get_price_unit_invoice(cr, uid, move, inv_type),
'discount': 0.0,
'account_analytic_id': False,
}
#----------------------------------------------------------
# Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def __get_invoice_state(self, cr, uid, ids, name, arg, context=None):
result = {}
for pick in self.browse(cr, uid, ids, context=context):
result[pick.id] = 'none'
for move in pick.move_lines:
if move.invoice_state == 'invoiced':
result[pick.id] = 'invoiced'
elif move.invoice_state == '2binvoiced':
result[pick.id] = '2binvoiced'
break
return result
def __get_picking_move(self, cr, uid, ids, context={}):
res = []
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id:
res.append(move.picking_id.id)
return res
def _set_inv_state(self, cr, uid, picking_id, name, value, arg, context=None):
pick = self.browse(cr, uid, picking_id, context=context)
moves = [x.id for x in pick.move_lines]
move_obj= self.pool.get("stock.move")
move_obj.write(cr, uid, moves, {'invoice_state': pick.invoice_state})
_columns = {
'invoice_state': fields.function(__get_invoice_state, type='selection', selection=[
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")
], string="Invoice Control", required=True,
fnct_inv = _set_inv_state,
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['state'], 10),
'stock.move': (__get_picking_move, ['picking_id', 'invoice_state'], 10),
},
),
}
_defaults = {
'invoice_state': lambda *args, **argv: 'none'
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
''' This function simply creates the invoice from the given values. It is overriden in delivery module to add the delivery costs.
'''
invoice_obj = self.pool.get('account.invoice')
return invoice_obj.create(cr, uid, vals, context=context)
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Gets the partner that will be invoiced
Note that this function is inherited in the sale and purchase modules
@param picking: object of the picking for which we are selecting the partner to invoice
@return: object of the partner to invoice
"""
return picking.partner_id and picking.partner_id.id
def action_invoice_create(self, cr, uid, ids, journal_id, group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
context = context or {}
todo = {}
for picking in self.browse(cr, uid, ids, context=context):
partner = self._get_partner_to_invoice(cr, uid, picking, context)
#grouping is based on the invoiced partner
if group:
key = partner
else:
key = picking.id
for move in picking.move_lines:
if move.invoice_state == '2binvoiced':
if (move.state != 'cancel') and not move.scrapped:
todo.setdefault(key, [])
todo[key].append(move)
invoices = []
for moves in todo.values():
invoices += self._invoice_create_line(cr, uid, moves, journal_id, type, context=context)
return invoices
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {
'origin': origin,
'date_invoice': context.get('date_inv', False),
'user_id': user_id,
'partner_id': partner.id,
'account_id': account_id,
'payment_term': payment_term,
'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id,
'currency_id': currency_id,
'journal_id': journal_id,
}
def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None):
invoice_obj = self.pool.get('account.invoice')
move_obj = self.pool.get('stock.move')
invoices = {}
for move in moves:
company = move.company_id
origin = move.picking_id.name
partner, user_id, currency_id = move_obj._get_master_data(cr, uid, move, company, context=context)
key = (partner, currency_id, company.id, user_id)
if key not in invoices:
# Get account and payment terms
invoice_vals = self._get_invoice_vals(cr, uid, key, inv_type, journal_id, origin, context=context)
invoice_id = self._create_invoice_from_picking(cr, uid, move.picking_id, invoice_vals, context=context)
invoices[key] = invoice_id
invoice_line_vals = move_obj._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
invoice_line_vals['invoice_id'] = invoices[key]
invoice_line_vals['origin'] = origin
move_obj._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
move_obj.write(cr, uid, move.id, {'invoice_state': 'invoiced'}, context=context)
invoice_obj.button_compute(cr, uid, invoices.values(), context=context, set_total=(inv_type in ('in_invoice', 'in_refund')))
return invoices.values()
|
agpl-3.0
|
sakuramilk/sc02c_kernel_gb
|
tools/perf/scripts/python/check-perf-trace.py
|
948
|
2501
|
# perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
daviddupont69/CouchPotatoServer
|
couchpotato/core/downloaders/base.py
|
3
|
6076
|
from base64 import b32decode, b16encode
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import Provider
import random
import re
log = CPLog(__name__)
class Downloader(Provider):
protocol = []
http_time_between_calls = 0
torrent_sources = [
'http://torrage.com/torrent/%s.torrent',
'https://torcache.net/torrent/%s.torrent',
]
torrent_trackers = [
'http://tracker.publicbt.com/announce',
'udp://tracker.istole.it:80/announce',
'udp://fr33domtracker.h33t.com:3310/announce',
'http://tracker.istole.it/announce',
'http://tracker.ccc.de/announce',
'udp://tracker.publicbt.com:80/announce',
'udp://tracker.ccc.de:80/announce',
'http://exodus.desync.com/announce',
'http://exodus.desync.com:6969/announce',
'http://tracker.publichd.eu/announce',
'http://tracker.openbittorrent.com/announce',
]
def __init__(self):
addEvent('download', self._download)
addEvent('download.enabled', self._isEnabled)
addEvent('download.enabled_protocols', self.getEnabledProtocol)
addEvent('download.status', self._getAllDownloadStatus)
addEvent('download.remove_failed', self._removeFailed)
addEvent('download.pause', self._pause)
addEvent('download.process_complete', self._processComplete)
def getEnabledProtocol(self):
for download_protocol in self.protocol:
if self.isEnabled(manual = True, data = {'protocol': download_protocol}):
return self.protocol
return []
def _download(self, data = None, movie = None, manual = False, filedata = None):
if not movie: movie = {}
if not data: data = {}
if self.isDisabled(manual, data):
return
return self.download(data = data, movie = movie, filedata = filedata)
def _getAllDownloadStatus(self):
if self.isDisabled(manual = True, data = {}):
return
return self.getAllDownloadStatus()
def getAllDownloadStatus(self):
return
def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}):
return
if release_download and release_download.get('downloader') == self.getName():
if self.conf('delete_failed'):
return self.removeFailed(release_download)
return False
return
def removeFailed(self, release_download):
return
def _processComplete(self, release_download):
if self.isDisabled(manual = True, data = {}):
return
if release_download and release_download.get('downloader') == self.getName():
if self.conf('remove_complete', default = False):
return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False))
return False
return
def processComplete(self, release_download, delete_files):
return
def isCorrectProtocol(self, protocol):
is_correct = protocol in self.protocol
if not is_correct:
log.debug("Downloader doesn't support this protocol")
return is_correct
def magnetToTorrent(self, magnet_link):
torrent_hash = re.findall('urn:btih:([\w]{32,40})', magnet_link)[0].upper()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
sources = self.torrent_sources
random.shuffle(sources)
for source in sources:
try:
filedata = self.urlopen(source % torrent_hash, headers = {'Referer': ''}, show_error = False)
if 'torcache' in filedata and 'file not found' in filedata.lower():
continue
return filedata
except:
log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source))
log.error('Failed converting magnet url to torrent: %s', torrent_hash)
return False
def downloadReturnId(self, download_id):
return {
'downloader': self.getName(),
'id': download_id
}
def isDisabled(self, manual = False, data = None):
if not data: data = {}
return not self.isEnabled(manual, data)
def _isEnabled(self, manual, data = None):
if not data: data = {}
if not self.isEnabled(manual, data):
return
return True
def isEnabled(self, manual = False, data = None):
if not data: data = {}
d_manual = self.conf('manual', default = False)
return super(Downloader, self).isEnabled() and \
(d_manual and manual or d_manual is False) and \
(not data or self.isCorrectProtocol(data.get('protocol')))
def _pause(self, release_download, pause = True):
if self.isDisabled(manual = True, data = {}):
return
if release_download and release_download.get('downloader') == self.getName():
self.pause(release_download, pause)
return True
return False
def pause(self, release_download, pause):
return
class ReleaseDownloadList(list):
provider = None
def __init__(self, provider, **kwargs):
self.provider = provider
self.kwargs = kwargs
super(ReleaseDownloadList, self).__init__()
def extend(self, results):
for r in results:
self.append(r)
def append(self, result):
new_result = self.fillResult(result)
super(ReleaseDownloadList, self).append(new_result)
def fillResult(self, result):
defaults = {
'id': 0,
'status': 'busy',
'downloader': self.provider.getName(),
'folder': '',
'files': '',
}
return mergeDicts(defaults, result)
|
gpl-3.0
|
rolandwz/pymisc
|
ustrader/strategy/oneTrader.py
|
2
|
7496
|
# -*- coding: utf-8 -*-
import datetime, time, csv, os
import numpy as np
from utils.db import SqliteDB
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from utils.rwlogging import balLogger as logb
from trader import Trader
from indicator import ma, macd, bolling, rsi, kdj
from strategy.pool import StrategyPool
mas = emas = smas = lwmas = std = prices = None
def runStrategy(in_prices):
global mas, emas, smas, lwmas, std, prices
log.debug('beginning one strategy ...')
prices = in_prices
ps = [p['close'] for p in prices]
std = [0] * 51
l = len(prices)
for period in range(20, 21):
std[period] = [0] * l
for i in range(period - 1, l):
std[period][i] = round(np.std(ps[i-period+1 : i+1], dtype=np.float64, ddof=0), 3)
mas = [0] * 121
emas = [0] * 121
smas = [0] * 121
lwmas = [0] * 121
for period in range(2, 121):
mas[period] = ma.calc_ma(ps, period)
emas[period] = ma.calc_ema(ps, period)
smas[period] = ma.calc_sma(ps, period)
lwmas[period] = ma.calc_lwma(ps, period)
#20_0.1_0.2_SMA_20_SMA_34_LWMA_40_SMA_20_SMA_34_LWMA_120_MA_20_SMA_34_LWMA_120,196,203,134,26371.25
pool = StrategyPool(100)
t = doTrade(pool, 20, 0.1, 0.2, 'SMA', 20, 'SMA', 34, 'LWMA', 40, 'SMA', 20, 'SMA', 34, 'LWMA', 120, 'MA', 20, 'SMA', 34, 'LWMA', 120)
pool.showStrategies()
return
log.debug('running first strategy ...')
starttime = time.time()
matypes = ['MA', 'EMA', 'SMA', 'LWMA']
#farr = [2, 3, 4, 5, 6, 7, ]
#s1arr = [4, 6, 8, 10, 12, 14, 16, 18, 20, ]
#s2arr = [0, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, ]
#farr = [20,]
#s1arr = [40, ]
#s2arr = [0, ]
farr = range(20, 21)[::3]
s1arr = range(4, 81)[::6]
s2arr = range(0, 121)[::10]
stdPeriod, stdGuage1, stdGuage2 = 20, 0.1, 0.2
pool = StrategyPool(50)
poola = StrategyPool(5)
poolb = StrategyPool(5)
poolc = StrategyPool(5)
for no in ['A', 'B', 'C']:
for ft, f in [(matype, period) for matype in matypes for period in farr]:
for s1t, s1 in [(matype, period) for matype in matypes for period in s1arr]:
if s1 < f: continue
elapsed = long(time.time() - starttime)
log.debug('== ' + str(elapsed) + ',' + ft + '_' + str(f) + ',' + s1t + '_' + str(s1) + ' ==')
for s2t, s2 in [(matype, period) for matype in matypes for period in s2arr]:
if s2 != 0 and s2 <= s1: continue
if s2 == 0 and (s2t == 'EMA' or s2t == 'SMA' or s2t == 'LWMA'): continue
if no == 'A':
doTrade(poola, stdPeriod, stdGuage1, stdGuage2, ft, f, s1t, s1, s2t, s2, '', 0, '', 0, '', 0, '', 0, '', 0, '', 0)
elif no == 'B':
doTrade(poolb, stdPeriod, stdGuage1, stdGuage2, '', 0, '', 0, '', 0, ft, f, s1t, s1, s2t, s2, '', 0, '', 0, '', 0)
elif no == 'C':
doTrade(poolc, stdPeriod, stdGuage1, stdGuage2, '', 0, '', 0, '', 0, '', 0, '', 0, '', 0, ft, f, s1t, s1, s2t, s2)
elapsed = long(time.time() - starttime)
log.info('find ' + no + ' time: ' + str(elapsed) + ' ')
for i in range(5):
sa = poola.strategies[i]
sb = poolb.strategies[i]
sc = poolc.strategies[i]
t = doTrade(pool, stdPeriod, stdGuage1, stdGuage2, sa[0].args[0], sa[0].args[1], sa[0].args[2], sa[0].args[3], sa[0].args[4], sa[0].args[5], sb[0].args[6], sb[0].args[7], sb[0].args[8], sb[0].args[9], sb[0].args[10], sb[0].args[11], sc[0].args[12], sc[0].args[13], sc[0].args[14], sc[0].args[15], sc[0].args[16], sc[0].args[17])
#t.generateGraph()
pool.showStrategies()
def doTrade(pool, stdPeriod, stdGuage1, stdGuage2, aft, af, as1t, as1, as2t, as2, bft, bf, bs1t, bs1, bs2t, bs2, cft, cf, cs1t, cs1, cs2t, cs2):
global std, prices
sname = str(stdPeriod) + '_' + str(stdGuage1) + '_' + str(stdGuage2)
sname += '_' + aft + '_' + str(af) + '_' + as1t + '_' + str(as1) + '_' + as2t + '_' + str(as2)
sname += '_' + bft + '_' + str(bf) + '_' + bs1t + '_' + str(bs1) + '_' + bs2t + '_' + str(bs2)
sname += '_' + cft + '_' + str(cf) + '_' + cs1t + '_' + str(cs1) + '_' + cs2t + '_' + str(cs2)
afma, as1ma, as2ma = getMas(aft, af), getMas(as1t, as1), getMas(as2t, as2)
bfma, bs1ma, bs2ma = getMas(bft, bf), getMas(bs1t, bs1), getMas(bs2t, bs2)
cfma, cs1ma, cs2ma = getMas(cft, cf), getMas(cs1t, cs1), getMas(cs2t, cs2)
front = max(as1, as2, bs1, bs2, cs1, cs2)
t = Trader(sname)
t.args = [aft, af, as1t, as1, as2t, as2, bft, bf, bs1t, bs1, bs2t, bs2, cft, cf, cs1t, cs1, cs2t, cs2]
for i in range(front, len(prices)):
price = prices[i]
if std[stdPeriod][i] > stdGuage2:
t.switchActiveCounter(2, price['dt'], price['rmb'])
elif std[stdPeriod][i] > stdGuage1:
t.switchActiveCounter(1, price['dt'], price['rmb'])
else:
t.switchActiveCounter(3, price['dt'], price['rmb'])
for cntNo in range(3):
if cntNo == 0: fma, s1, s1ma, s2, s2ma = afma, as1, as1ma, as2, as2ma
if cntNo == 1: fma, s1, s1ma, s2, s2ma = bfma, bs1, bs1ma, bs2, bs2ma
if cntNo == 2: fma, s1, s1ma, s2, s2ma = cfma, cs1, cs1ma, cs2, cs2ma
if s1 == 0 and s2 == 0: continue
volume = 0
notes = ''
if s1 > 0 and fma[i - 1] <= s1ma[i - 1] and fma[i] > s1ma[i]:
notes += 'f>s1;' + str(fma[i - 1]) + ';' + str(s1ma[i - 1]) + ';' + str(fma[i]) + ';' + str(s1ma[i]) + ';'
volume += 1
if s1 > 0 and fma[i - 1] >= s1ma[i - 1] and fma[i] < s1ma[i]:
notes += 'f<s1;' + str(fma[i - 1]) + ';' + str(s1ma[i - 1]) + ';' + str(fma[i]) + ';' + str(s1ma[i]) + ';'
volume -= 1
if s2 > 0 and fma[i - 1] <= s2ma[i - 1] and fma[i] > s2ma[i]:
notes += 'f>s2;' + str(fma[i - 1]) + ';' + str(s2ma[i - 1]) + ';' + str(fma[i]) + ';' + str(s2ma[i]) + ';'
volume += 1
if s2 > 0 and fma[i - 1] >= s2ma[i - 1] and fma[i] < s2ma[i]:
notes += 'f<s2;' + str(fma[i - 1]) + ';' + str(s2ma[i - 1]) + ';' + str(fma[i]) + ';' + str(s2ma[i]) + ';'
volume -= 1
t.processOrder(price['dt'], price['rmb'], volume, cntNo=cntNo, notes=notes)
pool.estimate(t)
return t
def getMas(matype, period):
global mas, emas, smas, lwmas
if matype == 'MA':
return mas[period]
elif matype == 'EMA':
return emas[period]
elif matype == 'SMA':
return smas[period]
elif matype == 'LWMA':
return lwmas[period]
else:
return None
def runStrategy_2(in_prices):
global mas, emas, smas, std, prices
log.debug('beginning first strategy ...')
prices = in_prices
ps = [p['close'] for p in prices]
std = [0] * 51
l = len(prices)
for period in range(2, 51):
std[period] = [0] * l
for i in range(period - 1, l):
std[period][i] = round(np.std(ps[i-period+1 : i+1], dtype=np.float64, ddof=0), 3)
mas = [0] * 61
emas = [0] * 61
smas = [0] * 61
for period in range(2, 61):
mas[period] = ma.calc_ma(ps, period)
emas[period] = ma.calc_ema(ps, period)
smas[period] = ma.calc_sma(ps, period)
log.debug('running first strategy ...')
starttime = datetime.datetime.now()
strat_as = [
['MA',7,'SMA',10,'SMA',12],
['MA',7,'SMA',14,'EMA',33],
['MA',7,'SMA',16,'EMA',27],
]
strat_bs = [
['SMA',7,'MA',12,'MA',12 ],
['SMA',7,'MA',12,'MA',36 ],
['MA',7,'SMA',14,'EMA',33],
]
pool = StrategyPool(100)
for stdPeriod in [25]:
stdGuage = 1.3
while stdGuage <= 1.3:
elapsed = (datetime.datetime.now() - starttime).seconds
log.debug('== ' + str(elapsed) + ',' + str(stdPeriod) + ',' + str(stdGuage) + ' ==')
for sa in strat_as:
for sb in strat_bs:
doTrade(pool, stdPeriod, stdGuage, sa[0], sa[1], sa[2], sa[3], sa[4], sa[5], sb[0], sb[1], sb[2], sb[3], sb[4], sb[5])
stdGuage += 0.02
pool.showStrategies()
return
|
mit
|
quiqueporta/django-admin-dialog
|
django_admin_dialog/migrations/0001_initial.py
|
1
|
1234
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='DjangoAdminDialog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=255)),
('element_id', models.CharField(max_length=255)),
('title', models.CharField(max_length=255, blank=True)),
('body', models.TextField()),
('active', models.BooleanField(default=True)),
('width', models.SmallIntegerField(default=300, blank=True)),
('max_height', models.SmallIntegerField(default=600, blank=True)),
],
options={
'verbose_name': 'DjangoAdminDialog',
'verbose_name_plural': 'DjangoAdminDialogs',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='djangoadmindialog',
unique_together=set([('url', 'element_id')]),
),
]
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.