repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Proteus-tech/nikola | nikola/plugins/compile/wiki.py | 1 | 2555 | # -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on CreoleWiki."""
import codecs
import os
try:
from creole import Parser
from creole.html_emitter import HtmlEmitter
creole = True
except ImportError:
creole = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing
class CompileWiki(PageCompiler):
"""Compile CreoleWiki into HTML."""
name = "wiki"
def compile_html(self, source, dest, is_two_file=True):
if creole is None:
req_missing(['creole'], 'build this site (compile CreoleWiki)')
makedirs(os.path.dirname(dest))
with codecs.open(dest, "w+", "utf8") as out_file:
with codecs.open(source, "r", "utf8") as in_file:
data = in_file.read()
document = Parser(data).parse()
output = HtmlEmitter(document).emit()
out_file.write(output)
def create_post(self, path, onefile=False, **kw):
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if onefile:
raise Exception('There are no comments in CreoleWiki markup, so '
'one-file format is not possible, use the -2 '
'option.')
with codecs.open(path, "wb+", "utf8") as fd:
fd.write("Write your post here.")
| mit |
magsilva/pae-helper | examples/tr1/3309267/corrigido-v1.py | 1 | 1686 | Alecio Elias Vitiello
Maikel Thiago Favarin Jacob
CONTINUAÇÃO DA LISTA 2 DE EXERCÍCIOS
# ERRO TOTAL 1.1: Não foi feito.
# ERRO TOTAL 1.2: Não foi feito.
1.3)
def isBeetween(x,y,z):
if (y<=x) and (x<=z):
return 1
else:
return 0
1.4)
def inversa(palavra):
for i in range(1,len(palavra)+1):
print palavra[-i]
1.5)
def ocorrencia(palavra,letra):
n=0
for i in range(0,len(palavra)):
if palavra[i] == letra:
n = n+1
return n
1.6)
def ocorrencia2(frase,palavra):
auxiliar=0
contador=0
for i in range(0,len(frase)-len(palavra)):
for k in range(0,len(palavra)-1):
if frase[i+k] == palavra[i+k]:
auxiliar = 0
else:
auxiliar = 1
if auxiliar == 0:
contador = contador + 1
return contador
# ERRO TOTAL 1.6: Código dá erro quando executado.
1.7)
def funcao():
for i in range(1000,9999):
numstring = str(i)
dezena1str[0] = numstring[0]
dezena1str[1] = numstring[1]
dezena2str[0] = numstring[2]
dezena2str[1] = numstring[3]
dezena1num = int(dezena1str)
dezena2num = int (dezena2str)
if sqrt(i) = dezena1num + dezena2num:
print i
# ERRO TOTAL 1.7: Código dá erro de sintaxe quando executado.
1.8)
def subnumeros(p,q):
pstr = str(p)
qstr = str(q)
auxiliar = 0
for i in range(0,len(qstr)-1):
for k in range(0,len(pstr)-1):
if qstr(i+k) == pstr(i+k):
auxiliar = 0
else:
auxiliar = -1
if auxiliar = 0:
print "p é subnumero de q"
# ERRO TOTAL 1.8: Código dá erro de sintaxe quando executado.
# ERRO TOTAL 1.9: Exercício não foi feito.
| gpl-2.0 |
alshedivat/tensorflow | tensorflow/python/kernel_tests/pooling_ops_3d_test.py | 5 | 15370 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is currently supported exclusively on CUDA GPUs.
test_configs += [("NCDHW", True)]
return test_configs
# TODO(mjanusz): Add microbenchmarks for 3d pooling.
class PoolingTest(test.TestCase):
def _VerifyOneTest(self, pool_func, input_sizes, window, strides, padding,
data_format, expected, use_gpu):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether to run ops on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
t = constant_op.constant(x, shape=input_sizes)
window = [1] + list(window) + [1]
strides = [1] + list(strides) + [1]
if data_format == "NCDHW":
t = test_util.NHWCToNCHW(t)
window = test_util.NHWCToNCHW(window)
strides = test_util.NHWCToNCHW(strides)
t = pool_func(
t,
ksize=window,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCDHW":
t = test_util.NCHWToNHWC(t)
vals = sess.run(t)
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def _VerifyValues(self, pool_func, input_sizes, window, strides,
padding, expected):
for data_format, use_gpu in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, window, strides, padding,
data_format, expected, use_gpu)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=(1, 2, 3),
strides=(2, 3, 1),
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=(1, 2, 2),
strides=(1, 2, 2),
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _ConstructAndTestGradientForConfig(self,
pool_func,
input_sizes,
output_sizes,
window,
strides,
padding,
data_format,
use_gpu):
"""Verifies the gradients of a pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
data_format: Data format string.
use_gpu: Whether to run on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.arange(1, total_size + 1, dtype=np.float32)
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
err_g_margin = 1e-3
err_gg_margin = 1.5e-2
if pool_func == nn_ops.avg_pool3d:
func_name = "avg_pool3d"
x_init_value = None
else:
x_init_value = np.asfarray(np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool3d"
ksize = [1, window[0], window[1], window[2], 1]
strides = [1, strides[0], strides[1], strides[2], 1]
t = input_tensor
if data_format == "NCDHW":
ksize = test_util.NHWCToNCHW(ksize)
strides = test_util.NHWCToNCHW(strides)
t = test_util.NHWCToNCHW(t)
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
t_g = gradients_impl.gradients(t**2, input_tensor)[0]
err_g = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
err_gg = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t_g,
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
print("%s gradient error = " % func_name, err_g)
self.assertLess(err_g, err_g_margin)
print("%s second-order gradient error = " % func_name, err_gg)
self.assertLess(err_gg, err_gg_margin)
def _ConstructAndTestGradient(self,
pool_func,
**kwargs):
"""Runs _ConstructAndTestGradientForConfig for all tests configurations."""
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(pool_func,
data_format=data_format,
use_gpu=use_gpu,
**kwargs)
def testMaxPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 3, 3, 3, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 2, 3, 4, 2],
output_sizes=[1, 1, 2, 3, 2],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding2_1_7_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 7, 1],
output_sizes=[1, 2, 1, 6, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testMaxPoolGradValidPadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[2, 2, 2, 2, 1],
output_sizes=[2, 1, 1, 1, 1],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
def testMaxPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 4, 1],
output_sizes=[1, 3, 2, 4, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
def testMaxPoolGradSamePadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 4, 1],
output_sizes=[1, 2, 1, 2, 1],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="SAME")
def testMaxPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 2, 4, 1],
output_sizes=[1, 3, 2, 4, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
def testMaxPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 5, 2, 4, 2],
output_sizes=[1, 3, 1, 2, 2],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
def testMaxPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.max_pool3d,
input_sizes=[1, 3, 4, 2, 1],
output_sizes=[1, 3, 4, 2, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradValidPadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 3, 3, 3, 1],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="VALID")
def testAvgPoolGradValidPadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 2],
output_sizes=[1, 2, 2, 2, 2],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[2, 2, 2, 2, 2],
output_sizes=[2, 1, 1, 1, 2],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 2, 4, 2],
output_sizes=[1, 3, 2, 4, 2],
window=(1, 1, 1),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradSamePadding1_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 2, 4, 2],
output_sizes=[1, 2, 1, 2, 2],
window=(1, 1, 1),
strides=(2, 2, 2),
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 2, 1],
output_sizes=[1, 2, 2, 2, 1],
window=(2, 2, 2),
strides=(1, 1, 1),
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 2, 4, 1],
output_sizes=[1, 3, 1, 2, 1],
window=(2, 2, 2),
strides=(2, 2, 2),
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._ConstructAndTestGradient(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 6, 2, 1],
output_sizes=[1, 3, 6, 2, 1],
window=(3, 3, 3),
strides=(1, 1, 1),
padding="SAME")
if __name__ == "__main__":
test.main()
| apache-2.0 |
jonobrien/School_Backups | cs1-python/Labs/week 8/hashtable.py | 1 | 9784 | """
Edited by: Jon O'Brien
Due date: 10/29/13
lab8 - hash tables
This program is used by word_cout.py to take an input text file and output the
number of unique words, total number of words, the words that appeared the most,
and the word count. A hash table was used to store the values for the text file
and the table was resized and rehashed when it was almost full, being detected
by a function for capacity and load of the table. A class was setup to hold
the entries for the table and different hash functions were tested to determine
the effectiveness of a given hash function over the construction of the hash
table. Each key and value pair had to be tested for being alreaedy in the table
and had to be acquired with a hashcode and put into the table, which was rehashed
if necessary.
"""
"""
file: hashtable.py
language: python3
author: [email protected] Sean Strout
author: [email protected] James Heliotis
author: [email protected] Arthur Nunes-Harwitt
author: [email protected] Jeremy Brown
description: open addressing Hash Table for CS 141 Lecture
"""
import copy
class HashTable( ):
"""
The HashTable data structure contains a collection of values
where each value is located by a hashable key.
No two values may have the same key, but more than one
key may have the same value.
"""
__slots__ = ( "table", "size" )
def mkHashTable(capacity=100):
"""
This function is the helper function for the hashTable class, it takes capacity as
a parameter. This function is used to make each instance for the hash table and
it helps to create the hash table that is rehashed when the need arises and the
table is made in this function as well with the list of lists defined in the range
of capacity.
"""
hTable = HashTable()
hTable.table = [list() for _ in range(capacity)]
hTable.size = 0
return hTable
def HashTableToStr(hashtable):
"""
This function takes the hashtable as a parameter. This function then converts the
results of every index in the table to a string. Everything is returned as a
result of the function after calling EntryToStr() function that does other changes
defined below.
"""
result = ""
for i in range( len( hashtable.table ) ):
if i != None:
result += str( i ) + ": "
result += EntryToStr( hashtable.table[i] ) + "\n"
return result
class _Entry( ):
"""
This is a class used to hold key-value pairs. This class takes no parameters. It stores
every key-value and is helped by other functions defined below.
"""
__slots__ = ( "key", "value" )
def EntryToStr(entry):
"""
This function takes entry as a parameter. This function is a helper function for the
class entry. It converts every entry into a string from a key-value pair.
"""
return "(" + str( entry.key ) + ", " + str( entry.value ) + ")"
def mkEntry(key, value):
"""
This function takes key and value as parameters. This function is a helper function for the
class of Entry() and it creates entries in the class that are used later and converted for use
by other functions.
"""
aEntry = _Entry()
aEntry.key = key
aEntry.value = value
return aEntry
def given_hash( val, n ):
"""
This function Computes a hash of the val string that is in [0 ... n), this hashing function was given in
the file and it makes hashcodes from the values in use in the program and these are useful
for the key-value pairs and indices in the table, also useful for rehashing as well.
"""
hashcode = hash( val ) % n
# hashcode = 0
# hashcode = len(val) % n
return hashcode
def hash_function(val, n):
"""
This is another method to acquire the hashcodes. Similar to the given_hash() function above
it takes the values and n that are parameters and makes hashcodes for use in the creation of
the table and indices for the key-value pairs. This funcion in particular takes the ordinal
value for each letter and assigns it an integer it can then perform modular division on to
calculate the hashcode on.
"""
number = 0
for letter in val:
number = number + ord(letter) #turn the letter into a number
return number%n
def keys( hTable ):
"""
This function returns a list of keys in the given hashTable. This function takes the
paramter of the hash table. Then the function iterates over each item in the table
and tests for an empty slot in the table, if there is, it is added to the table
and the calculations continue for each item in the table.
"""
result = []
for item in range(len(hTable.table)):
if item != []:
for entry in item:
result.append( entry.key )
return result
def contains( hTable, key ):
"""
This function return True iff hTable has an entry with the given key. This function
takes the hash table and key as parameters. This function then iterates over each
intex in the table and checks for each key being in the table and returns false if not.
"""
index = hash_function( key, len( hTable.table ) )
lst = hTable.table[ index ]
for i in lst:
if i.key == key:
return True
return False
def put( hTable, key, value ):
"""
Using the given hash table, set the given key to the
given value. If the key already exists, the given value
will replace the previous one already in the table.
If the table is full, an Exception is raised.
The load is tested here to determine if rehashing is necessary and if the load is
met or exceeded, then the table is rehashed and the functions are ran again until
the table is full and there are no more key-value pairs to add to the table.
"""
ratio = load( hTable )
if ratio >= .75:
rehash( hTable)
index = hash_function( key, len( hTable.table ) )
if hTable.table[ index ] == []:
hTable.table[ index ] = [ mkEntry(key, value) ]
hTable.size += 1
else:
for i in range( len( hTable.table[ index ] ) ):
if hTable.table[ index ][ i ].key == key:
hTable.table[ index ][ i ].value = value
return True
hTable.table[ index ].append( mkEntry(key, value))
return True
def get( hTable, key ):
"""
Return the value associated with the given key in
the given hash table.
Precondition: contains(hTable, key)
This function takes the hash table and key as parameters. It uses the key and
tests for the index of each location to be empty, if it is then an exception
is raised and the key is not in the table. otherwise there is a list that is
constructed of the indices and the keys are used to assist with this as well.
If the key is not in the table, then another exception is raised as well.
"""
index = hash_function( key, len( hTable.table ) )
if hTable.table[ index ] == []:
raise Exception( "Hash table does not contain key." )
else:
lst = hTable.table[ index ]
for i in lst:
if i.key == key:
return i.value
raise Exception( "Hash table does not contain key." )
def imbalance( hTable ):
"""
Compute average length of all non-empty chains
This function takes the parameter of the hashtable that is used in the
program to store the key-value pairs.
Imbalance() measures the effectiveness of the hashing function, the lower the
number, the closer to zero, the better.
"""
numOfChains = 0
total = 0
for i in range( len( hTable.table ) ):
if hTable.table[ i ] != []:
total += len( hTable.table[ i ] )
numOfChains += 1
avg = (total / numOfChains)-1
return avg
def load( hTable ):
"""
This function checks ratio of items in table to table size, to determine the load on
the hashtable, for rehashing. It takes the hash table as a parameter. It performs
calculations with iterating over each index of the size and accumulates the total.
This total is divided by size to determine the load on the table, used in put()
to make the table perform a rehash if it becomes to full and needs resizing.
"""
total = 0
size = len( hTable.table )
for i in range( size ):
total += len( hTable.table[ i ] )
return total/size
def rehash( hTable):
"""
This function performs a rehash every time the table starts to fill up, determined by
the load on the table. This function takes the hash table as a parameter. It iterates
over each item in the hash table and moves it to a new table and increaes the size of
the table for use again. The table is increased in size until all the key-value pairs
are in the table and the load is not met or exceeded. Put is used to rework the tables
and rematch up the key-value pairs to their correct location during the rehash to the
new table that is being created and expanded upon.
"""
newN = ( 2 * len( hTable.table ) ) + 1
newTable = mkHashTable( newN )
for i in range( len( hTable.table ) ):
for item in hTable.table[ i ]:
myKey = item.key
myValue = item.value
put(newTable, myKey, myValue)
hTable.table = newTable.table
hTable.size = newTable.size
return newTable
| gpl-3.0 |
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_math.py | 34 | 6550 | # Python test set -- math module
# XXXX Should not do tests around zero only
from test.test_support import TestFailed, verbose
seps='1e-05'
eps = eval(seps)
print 'math module, testing with eps', seps
import math
def testit(name, value, expected):
if abs(value-expected) > eps:
raise TestFailed, '%s returned %f, expected %f'%\
(name, value, expected)
print 'constants'
testit('pi', math.pi, 3.1415926)
testit('e', math.e, 2.7182818)
print 'acos'
testit('acos(-1)', math.acos(-1), math.pi)
testit('acos(0)', math.acos(0), math.pi/2)
testit('acos(1)', math.acos(1), 0)
print 'asin'
testit('asin(-1)', math.asin(-1), -math.pi/2)
testit('asin(0)', math.asin(0), 0)
testit('asin(1)', math.asin(1), math.pi/2)
print 'atan'
testit('atan(-1)', math.atan(-1), -math.pi/4)
testit('atan(0)', math.atan(0), 0)
testit('atan(1)', math.atan(1), math.pi/4)
print 'atan2'
testit('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
testit('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
testit('atan2(0, 1)', math.atan2(0, 1), 0)
testit('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
testit('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
print 'ceil'
testit('ceil(0.5)', math.ceil(0.5), 1)
testit('ceil(1.0)', math.ceil(1.0), 1)
testit('ceil(1.5)', math.ceil(1.5), 2)
testit('ceil(-0.5)', math.ceil(-0.5), 0)
testit('ceil(-1.0)', math.ceil(-1.0), -1)
testit('ceil(-1.5)', math.ceil(-1.5), -1)
print 'cos'
testit('cos(-pi/2)', math.cos(-math.pi/2), 0)
testit('cos(0)', math.cos(0), 1)
testit('cos(pi/2)', math.cos(math.pi/2), 0)
testit('cos(pi)', math.cos(math.pi), -1)
print 'cosh'
testit('cosh(0)', math.cosh(0), 1)
testit('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
print 'degrees'
testit('degrees(pi)', math.degrees(math.pi), 180.0)
testit('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
testit('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
print 'exp'
testit('exp(-1)', math.exp(-1), 1/math.e)
testit('exp(0)', math.exp(0), 1)
testit('exp(1)', math.exp(1), math.e)
print 'fabs'
testit('fabs(-1)', math.fabs(-1), 1)
testit('fabs(0)', math.fabs(0), 0)
testit('fabs(1)', math.fabs(1), 1)
print 'floor'
testit('floor(0.5)', math.floor(0.5), 0)
testit('floor(1.0)', math.floor(1.0), 1)
testit('floor(1.5)', math.floor(1.5), 1)
testit('floor(-0.5)', math.floor(-0.5), -1)
testit('floor(-1.0)', math.floor(-1.0), -1)
testit('floor(-1.5)', math.floor(-1.5), -2)
print 'fmod'
testit('fmod(10,1)', math.fmod(10,1), 0)
testit('fmod(10,0.5)', math.fmod(10,0.5), 0)
testit('fmod(10,1.5)', math.fmod(10,1.5), 1)
testit('fmod(-10,1)', math.fmod(-10,1), 0)
testit('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
testit('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
print 'frexp'
def testfrexp(name, (mant, exp), (emant, eexp)):
if abs(mant-emant) > eps or exp != eexp:
raise TestFailed, '%s returned %r, expected %r'%\
(name, (mant, exp), (emant,eexp))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
print 'hypot'
testit('hypot(0,0)', math.hypot(0,0), 0)
testit('hypot(3,4)', math.hypot(3,4), 5)
print 'ldexp'
testit('ldexp(0,1)', math.ldexp(0,1), 0)
testit('ldexp(1,1)', math.ldexp(1,1), 2)
testit('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
testit('ldexp(-1,1)', math.ldexp(-1,1), -2)
print 'log'
testit('log(1/e)', math.log(1/math.e), -1)
testit('log(1)', math.log(1), 0)
testit('log(e)', math.log(math.e), 1)
testit('log(32,2)', math.log(32,2), 5)
testit('log(10**40, 10)', math.log(10**40, 10), 40)
testit('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
print 'log10'
testit('log10(0.1)', math.log10(0.1), -1)
testit('log10(1)', math.log10(1), 0)
testit('log10(10)', math.log10(10), 1)
print 'modf'
def testmodf(name, (v1, v2), (e1, e2)):
if abs(v1-e1) > eps or abs(v2-e2):
raise TestFailed, '%s returned %r, expected %r'%\
(name, (v1,v2), (e1,e2))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
print 'pow'
testit('pow(0,1)', math.pow(0,1), 0)
testit('pow(1,0)', math.pow(1,0), 1)
testit('pow(2,1)', math.pow(2,1), 2)
testit('pow(2,-1)', math.pow(2,-1), 0.5)
print 'radians'
testit('radians(180)', math.radians(180), math.pi)
testit('radians(90)', math.radians(90), math.pi/2)
testit('radians(-45)', math.radians(-45), -math.pi/4)
print 'sin'
testit('sin(0)', math.sin(0), 0)
testit('sin(pi/2)', math.sin(math.pi/2), 1)
testit('sin(-pi/2)', math.sin(-math.pi/2), -1)
print 'sinh'
testit('sinh(0)', math.sinh(0), 0)
testit('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
testit('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
print 'sqrt'
testit('sqrt(0)', math.sqrt(0), 0)
testit('sqrt(1)', math.sqrt(1), 1)
testit('sqrt(4)', math.sqrt(4), 2)
print 'tan'
testit('tan(0)', math.tan(0), 0)
testit('tan(pi/4)', math.tan(math.pi/4), 1)
testit('tan(-pi/4)', math.tan(-math.pi/4), -1)
print 'tanh'
testit('tanh(0)', math.tanh(0), 0)
testit('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
def test_exceptions():
print 'exceptions'
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
raise TestFailed("underflowing exp() should not have raised "
"an exception")
if x != 0:
raise TestFailed("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
raise TestFailed("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
raise TestFailed("sqrt(-1) didn't raise ValueError")
if verbose:
test_exceptions()
| epl-1.0 |
mzhaom/grpc | src/python/src/grpc/framework/face/_service.py | 41 | 7187 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Behaviors for servicing RPCs."""
# base_interfaces and interfaces are referenced from specification in this
# module.
from grpc.framework.base import interfaces as base_interfaces # pylint: disable=unused-import
from grpc.framework.face import _control
from grpc.framework.face import exceptions
from grpc.framework.face import interfaces # pylint: disable=unused-import
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import callable_util
from grpc.framework.foundation import stream
from grpc.framework.foundation import stream_util
class _ValueInStreamOutConsumer(stream.Consumer):
"""A stream.Consumer that maps inputs one-to-many onto outputs."""
def __init__(self, behavior, context, downstream):
"""Constructor.
Args:
behavior: A callable that takes a single value and an
interfaces.RpcContext and returns a generator of arbitrarily many
values.
context: An interfaces.RpcContext.
downstream: A stream.Consumer to which to pass the values generated by the
given behavior.
"""
self._behavior = behavior
self._context = context
self._downstream = downstream
def consume(self, value):
_control.pipe_iterator_to_consumer(
self._behavior(value, self._context), self._downstream,
self._context.is_active, False)
def terminate(self):
self._downstream.terminate()
def consume_and_terminate(self, value):
_control.pipe_iterator_to_consumer(
self._behavior(value, self._context), self._downstream,
self._context.is_active, True)
def _pool_wrap(behavior, operation_context):
"""Wraps an operation-related behavior so that it may be called in a pool.
Args:
behavior: A callable related to carrying out an operation.
operation_context: A base_interfaces.OperationContext for the operation.
Returns:
A callable that when called carries out the behavior of the given callable
and handles whatever exceptions it raises appropriately.
"""
def translation(*args):
try:
behavior(*args)
except (
abandonment.Abandoned,
exceptions.ExpirationError,
exceptions.CancellationError,
exceptions.ServicedError,
exceptions.NetworkError) as e:
if operation_context.is_active():
operation_context.fail(e)
except Exception as e:
operation_context.fail(e)
return callable_util.with_exceptions_logged(
translation, _control.INTERNAL_ERROR_LOG_MESSAGE)
def adapt_inline_value_in_value_out(method):
def adaptation(response_consumer, operation_context):
rpc_context = _control.RpcContext(operation_context)
return stream_util.TransformingConsumer(
lambda request: method(request, rpc_context), response_consumer)
return adaptation
def adapt_inline_value_in_stream_out(method):
def adaptation(response_consumer, operation_context):
rpc_context = _control.RpcContext(operation_context)
return _ValueInStreamOutConsumer(method, rpc_context, response_consumer)
return adaptation
def adapt_inline_stream_in_value_out(method, pool):
def adaptation(response_consumer, operation_context):
rendezvous = _control.Rendezvous()
operation_context.add_termination_callback(rendezvous.set_outcome)
def in_pool_thread():
response_consumer.consume_and_terminate(
method(rendezvous, _control.RpcContext(operation_context)))
pool.submit(_pool_wrap(in_pool_thread, operation_context))
return rendezvous
return adaptation
def adapt_inline_stream_in_stream_out(method, pool):
"""Adapts an interfaces.InlineStreamInStreamOutMethod for use with Consumers.
RPCs may be serviced by calling the return value of this function, passing
request values to the stream.Consumer returned from that call, and receiving
response values from the stream.Consumer passed to that call.
Args:
method: An interfaces.InlineStreamInStreamOutMethod.
pool: A thread pool.
Returns:
A callable that takes a stream.Consumer and a
base_interfaces.OperationContext and returns a stream.Consumer.
"""
def adaptation(response_consumer, operation_context):
rendezvous = _control.Rendezvous()
operation_context.add_termination_callback(rendezvous.set_outcome)
def in_pool_thread():
_control.pipe_iterator_to_consumer(
method(rendezvous, _control.RpcContext(operation_context)),
response_consumer, operation_context.is_active, True)
pool.submit(_pool_wrap(in_pool_thread, operation_context))
return rendezvous
return adaptation
def adapt_event_value_in_value_out(method):
def adaptation(response_consumer, operation_context):
def on_payload(payload):
method(
payload, response_consumer.consume_and_terminate,
_control.RpcContext(operation_context))
return _control.UnaryConsumer(on_payload)
return adaptation
def adapt_event_value_in_stream_out(method):
def adaptation(response_consumer, operation_context):
def on_payload(payload):
method(
payload, response_consumer, _control.RpcContext(operation_context))
return _control.UnaryConsumer(on_payload)
return adaptation
def adapt_event_stream_in_value_out(method):
def adaptation(response_consumer, operation_context):
rpc_context = _control.RpcContext(operation_context)
return method(response_consumer.consume_and_terminate, rpc_context)
return adaptation
def adapt_event_stream_in_stream_out(method):
def adaptation(response_consumer, operation_context):
return method(response_consumer, _control.RpcContext(operation_context))
return adaptation
| bsd-3-clause |
foodszhang/kbengine | kbe/src/lib/python/Lib/encodings/iso8859_7.py | 272 | 12844 | """ Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-7',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK
'\xa3' # 0xA3 -> POUND SIGN
'\u20ac' # 0xA4 -> EURO SIGN
'\u20af' # 0xA5 -> DRACHMA SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u037a' # 0xAA -> GREEK YPOGEGRAMMENI
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\u2015' # 0xAF -> HORIZONTAL BAR
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u0384' # 0xB4 -> GREEK TONOS
'\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS
'\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
'\ufffe'
'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
'\u03bd' # 0xED -> GREEK SMALL LETTER NU
'\u03be' # 0xEE -> GREEK SMALL LETTER XI
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
claell/plugin.video.prime_instant | default.py | 1 | 66195 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import socket
import mechanize
import cookielib
import sys
import re
import os
import json
import time
import string
import random
import shutil
import subprocess
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
addon = xbmcaddon.Addon()
addonID = addon.getAddonInfo('id')
addonFolder = downloadScript = xbmc.translatePath('special://home/addons/'+addonID).decode('utf-8')
addonUserDataFolder = xbmc.translatePath("special://profile/addon_data/"+addonID).decode('utf-8')
icon = os.path.join(addonFolder, "icon.png").encode('utf-8')
def translation(id):
return addon.getLocalizedString(id).encode('utf-8')
if not os.path.exists(os.path.join(addonUserDataFolder, "settings.xml")):
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30081)+',10000,'+icon+')')
addon.openSettings()
socket.setdefaulttimeout(30)
pluginhandle = int(sys.argv[1])
cj = cookielib.MozillaCookieJar()
downloadScript = os.path.join(addonFolder, "download.py").encode('utf-8')
downloadScriptTV = os.path.join(addonFolder, "downloadTV.py").encode('utf-8')
cacheFolder = os.path.join(addonUserDataFolder, "cache")
cacheFolderCoversTMDB = os.path.join(cacheFolder, "covers")
cacheFolderFanartTMDB = os.path.join(cacheFolder, "fanart")
addonFolderResources = os.path.join(addonFolder, "resources")
defaultFanart = os.path.join(addonFolderResources, "fanart.png")
libraryFolder = os.path.join(addonUserDataFolder, "library")
libraryFolderMovies = os.path.join(libraryFolder, "Movies")
libraryFolderTV = os.path.join(libraryFolder, "TV")
cookieFile = os.path.join(addonUserDataFolder, "cookies")
debugFile = os.path.join(addonUserDataFolder, "debug")
preferAmazonTrailer = addon.getSetting("preferAmazonTrailer") == "true"
showNotification = addon.getSetting("showNotification") == "true"
showOriginals = addon.getSetting("showOriginals") == "true"
showLibrary = addon.getSetting("showLibrary") == "true"
showKids = addon.getSetting("showKids") == "true"
forceView = addon.getSetting("forceView") == "true"
updateDB = addon.getSetting("updateDB") == "true"
useTMDb = addon.getSetting("useTMDb") == "true"
watchlistOrder = addon.getSetting("watchlistOrder")
watchlistOrder = ["DATE_ADDED_DESC", "TITLE_ASC"][int(watchlistOrder)]
maxBitrate = addon.getSetting("maxBitrate")
maxBitrate = [300, 600, 900, 1350, 2000, 2500, 4000, 6000, 10000, -1][int(maxBitrate)]
maxDevices = 3
maxDevicesWaitTime = 120
siteVersion = addon.getSetting("siteVersion")
apiMain = ["atv-ps", "atv-ps-eu", "atv-ps-eu"][int(siteVersion)]
rtmpMain = ["azusfms", "azeufms", "azeufms"][int(siteVersion)]
siteVersion = ["com", "co.uk", "de"][int(siteVersion)]
viewIdMovies = addon.getSetting("viewIdMovies")
viewIdShows = addon.getSetting("viewIdShows")
viewIdSeasons = addon.getSetting("viewIdSeasons")
viewIdEpisodes = addon.getSetting("viewIdEpisodes")
viewIdDetails = addon.getSetting("viewIdDetails")
urlMain = "http://www.amazon."+siteVersion
urlMainS = "https://www.amazon."+siteVersion
addon.setSetting('email', '')
addon.setSetting('password', '')
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
userAgent = "Mozilla/5.0 (X11; U; Linux i686; de-DE) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Large Screen Safari/533.4 GoogleTV/ 162671"
opener.addheaders = [('User-agent', userAgent)]
deviceTypeID = "A324MFXUEZFF7B"
if not os.path.isdir(addonUserDataFolder):
os.mkdir(addonUserDataFolder)
if not os.path.isdir(cacheFolder):
os.mkdir(cacheFolder)
if not os.path.isdir(cacheFolderCoversTMDB):
os.mkdir(cacheFolderCoversTMDB)
if not os.path.isdir(cacheFolderFanartTMDB):
os.mkdir(cacheFolderFanartTMDB)
if not os.path.isdir(libraryFolder):
os.mkdir(libraryFolder)
if not os.path.isdir(libraryFolderMovies):
os.mkdir(libraryFolderMovies)
if not os.path.isdir(libraryFolderTV):
os.mkdir(libraryFolderTV)
if os.path.exists(cookieFile):
cj.load(cookieFile)
def index():
loginResult = login()
if loginResult=="prime":
addDir(translation(30002), "", 'browseMovies', "")
addDir(translation(30003), "", 'browseTV', "")
xbmcplugin.endOfDirectory(pluginhandle)
elif loginResult=="noprime":
listOriginals()
elif loginResult=="none":
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30082)+',10000,'+icon+')')
def browseMovies():
addDir(translation(30004), urlMain+"/gp/video/watchlist/movie/?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if showLibrary:
addDir(translation(30005), urlMain+"/gp/video/library/movie?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if siteVersion=="de":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010075031%2Cn%3A3356018031&sort=popularity-rank", 'listMovies', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30014), "", 'listDecadesMovie', "")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3010075031%2Cn%3A!3010076031%2Cn%3A3015915031%2Cp_n_theme_browse-bin%3A3015972031%2Cp_85%3A3282148031&ie=UTF8", 'listMovies', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031&sort=date-desc-rank", 'listMovies', "")
addDir(translation(30009), urlMain+"/s/?n=4963842031", 'listMovies', "")
elif siteVersion=="com":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613704011&sort=popularity-rank", 'listMovies', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613704011&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30012), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613704011&pickerToList=feature_five_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30013), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613704011&pickerToList=feature_six_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30014), "", 'listDecadesMovie', "")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_theme_browse-bin%3A2650365011&ie=UTF8", 'listMovies', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613704011&sort=date-desc-rank", 'listMovies', "")
elif siteVersion=="co.uk":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356010031&sort=popularity-rank", 'listMovies', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30014), "", 'listDecadesMovie', "")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_theme_browse-bin%3A3046745031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356010031&sort=date-desc-rank", 'listMovies', "")
addDir(translation(30015), "movies", 'search', "")
xbmcplugin.endOfDirectory(pluginhandle)
def browseTV():
addDir(translation(30004), urlMain+"/gp/video/watchlist/tv/?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if showLibrary:
addDir(translation(30005), urlMain+"/gp/video/library/tv/?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if siteVersion=="de":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010075031%2Cn%3A3356019031&sort=popularity-rank", 'listShows', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356019031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "tv")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3010075031%2Cn%3A!3010076031%2Cn%3A3015916031%2Cp_n_theme_browse-bin%3A3015972031%2Cp_85%3A3282148031&ie=UTF8", 'listShows', "")
addDir(translation(30010), urlMain+"/gp/search/ajax/?_encoding=UTF8&keywords=[OV]&rh=n%3A3010075031%2Cn%3A3015916031%2Ck%3A[OV]%2Cp_85%3A3282148031&sort=date-desc-rank", 'listShows', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3279204031%2Cn%3A3010075031%2Cn%3A3015916031&sort=date-desc-rank", 'listShows', "")
elif siteVersion=="com":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613705011&sort=popularity-rank", 'listShows', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613705011&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "tv")
addDir(translation(30012), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613705011&pickerToList=feature_five_browse-bin&ie=UTF8", 'listGenres', "", "tv")
addDir(translation(30013), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613705011&pickerToList=feature_six_browse-bin&ie=UTF8", 'listGenres', "", "tv")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613705011%2Cp_n_theme_browse-bin%3A2650365011&sort=csrank&ie=UTF8", 'listShows', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613705011&sort=date-desc-rank", 'listShows', "")
elif siteVersion=="co.uk":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356011031&sort=popularity-rank", 'listShows', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356011031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "tv")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356011031%2Cp_n_theme_browse-bin%3A3046745031&sort=popularity-rank&ie=UTF8", 'listShows', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356011031&sort=date-desc-rank", 'listShows', "")
if showOriginals:
addDir("Amazon Originals: Pilot Season 2015", "", 'listOriginals', "")
addDir(translation(30015), "tv", 'search', "")
xbmcplugin.endOfDirectory(pluginhandle)
def listDecadesMovie():
if siteVersion=="de":
addDir(translation(30016), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289642031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30017), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289643031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30018), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289644031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30019), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289645031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30020), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289646031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30021), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289647031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30022), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289648031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
elif siteVersion=="com":
addDir(translation(30016), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651255011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30017), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651256011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30018), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651257011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30019), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651258011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30020), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651259011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30021), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651260011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30022), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651261011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
elif siteVersion=="co.uk":
addDir(translation(30016), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289666031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30017), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289667031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30018), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289668031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30019), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289669031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30020), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289670031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30021), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289671031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30022), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289672031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
xbmcplugin.endOfDirectory(pluginhandle)
def listOriginals():
if siteVersion=="de":
content = opener.open(urlMain+"/b/?ie=UTF8&node=5457207031").read()
elif siteVersion=="com":
content = opener.open(urlMain+"/b/?ie=UTF8&node=9940930011").read()
elif siteVersion=="co.uk":
content = opener.open(urlMain+"/b/?ie=UTF8&node=5687760031").read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
content = content[content.find('<map name="pilots'):]
content = content[:content.find('</map>')]
spl = content.split('shape="rect"')
thumbs = {}
thumbs['maninthehighcastle'] = 'http://ecx.images-amazon.com/images/I/5114a5G6oQL.jpg'
thumbs['cocked'] = 'http://ecx.images-amazon.com/images/I/51ky16-xESL.jpg'
thumbs['maddogs'] = 'http://ecx.images-amazon.com/images/I/61mWRYn7U2L.jpg'
thumbs['thenewyorkerpresents'] = 'http://ecx.images-amazon.com/images/I/41Yb8SUjMzL.jpg'
thumbs['pointofhonor'] = 'http://ecx.images-amazon.com/images/I/51OBmT5ARUL.jpg'
thumbs['downdog'] = 'http://ecx.images-amazon.com/images/I/51N2zkhOxGL.jpg'
thumbs['salemrogers'] = 'http://ecx.images-amazon.com/images/I/510nXRWkoaL.jpg'
thumbs['table58'] = 'http://ecx.images-amazon.com/images/I/51AIPgzNiWL.jpg'
thumbs['buddytechdetective'] = 'http://ecx.images-amazon.com/images/I/513pbjgDLYL.jpg'
thumbs['sarasolvesit'] = 'http://ecx.images-amazon.com/images/I/51Y5G5RbLUL.jpg'
thumbs['stinkyanddirty'] = 'http://ecx.images-amazon.com/images/I/51WzytCUmdL.jpg'
thumbs['niko'] = 'http://ecx.images-amazon.com/images/I/51XjJrg9JLL.jpg'
thumbs['justaddmagic'] = 'http://ecx.images-amazon.com/images/I/5159YFd0hQL.jpg'
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile("/gp/product/(.+?)/", re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
titleT = title.lower().replace(' ', '').strip()
titleT = titleT.replace("pointofhonour", "pointofhonor")
titleT = titleT.replace("buddytechdective", "buddytechdetective")
titleT = titleT.replace("buddytechdetectives", "buddytechdetective")
titleT = titleT.replace("thestinkyanddirtyshow", "stinkyanddirty")
titleT = titleT.replace("nikkoandtheswordoflight", "niko")
titleT = titleT.replace("nikoandtheswordoflight", "niko")
thumb = ""
if titleT in thumbs:
thumb = thumbs[titleT]
addShowDir(title, videoID, "listSeasons", thumb, "tv")
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode(500)')
def listWatchList(url):
content = opener.open(url).read()
debug(content)
match = re.compile('csrf":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('<div class="innerItem"')
dlParams = []
videoType = ""
showEntries = []
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</td>')]
if "/library/" in url or ("/watchlist/" in url and ("class='prime-meta'" in entry or 'class="prime-logo"' in entry)):
match = re.compile('data-prod-type="(.+?)"', re.DOTALL).findall(entry)
if match:
videoType = match[0]
match = re.compile('id="(.+?)"', re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
dlParams.append({'type':videoType, 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':''})
if videoType=="tv":
title = cleanSeasonTitle(title)
if title in showEntries:
continue
showEntries.append(title)
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = ""
if match:
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if videoType == "movie":
addLinkR(title, videoID, "playVideo", thumbUrl, videoType)
else:
addShowDirR(title, videoID, "listSeasons", thumbUrl, videoType)
if videoType == "movie":
xbmcplugin.setContent(pluginhandle, "movies")
else:
xbmcplugin.setContent(pluginhandle, "tvshows")
if useTMDb and videoType == "movie":
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(str(dlParams))+')')
elif useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScriptTV+', '+urllib.quote_plus(str(dlParams))+')')
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
if videoType == "movie":
xbmc.executebuiltin('Container.SetViewMode('+viewIdMovies+')')
else:
xbmc.executebuiltin('Container.SetViewMode('+viewIdShows+')')
def listMovies(url):
xbmcplugin.setContent(pluginhandle, "movies")
content = opener.open(url).read()
debug(content)
content = content.replace("\\","")
if 'id="catCorResults"' in content:
content = content[:content.find('id="catCorResults"')]
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('id="result_')
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('asin="(.+?)"', re.DOTALL).findall(entry)
if match and ">Prime Instant Video<" in entry:
videoID = match[0]
match1 = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
match2 = re.compile('class="ilt2">(.+?)<', re.DOTALL).findall(entry)
title = ""
if match1:
title = match1[0]
elif match2:
title = match2[0]
title = cleanTitle(title)
match1 = re.compile('class="a-size-small a-color-secondary">(.+?)<', re.DOTALL).findall(entry)
match2 = re.compile('class="med reg subt">(.+?)<', re.DOTALL).findall(entry)
year = ""
if match1:
year = match1[0].strip()
if match2:
year = match2[0].strip()
dlParams.append({'type':'movie', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':year})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
match = re.compile('data-action="s-watchlist-add".+?class="a-button a-button-small(.+?)"', re.DOTALL).findall(entry)
if match and match[0]==" s-hidden":
addLinkR(title, videoID, "playVideo", thumbUrl, "movie", "", "", year)
else:
addLink(title, videoID, "playVideo", thumbUrl, "movie", "", "", year)
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(str(dlParams))+')')
match = re.compile('class="pagnNext".*?href="(.+?)"', re.DOTALL).findall(content)
if match:
addDir(translation(30001), urlMain+match[0].replace("&","&"), "listMovies", "DefaultTVShows.png")
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdMovies+')')
def listShows(url):
xbmcplugin.setContent(pluginhandle, "tvshows")
content = opener.open(url).read()
debug(content)
content = content.replace("\\","")
if 'id="catCorResults"' in content:
content = content[:content.find('id="catCorResults"')]
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('id="result_')
showEntries = []
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('asin="(.+?)"', re.DOTALL).findall(entry)
if match and ">Prime Instant Video<" in entry:
videoID = match[0]
match1 = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
match2 = re.compile('class="ilt2">(.+?)<', re.DOTALL).findall(entry)
title = ""
if match1:
title = match1[0]
elif match2:
title = match2[0]
title = cleanTitle(title)
title = cleanSeasonTitle(title)
if title in showEntries:
continue
showEntries.append(title)
match1 = re.compile('class="a-size-small a-color-secondary">(.+?)<', re.DOTALL).findall(entry)
match2 = re.compile('class="med reg subt">(.+?)<', re.DOTALL).findall(entry)
year = ""
if match1:
year = match1[0].strip()
if match2:
year = match2[0].strip()
dlParams.append({'type':'tv', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':year})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
match = re.compile('data-action="s-watchlist-add".+?class="a-button a-button-small(.*?)"', re.DOTALL).findall(entry)
if match and match[0]==" s-hidden":
addShowDirR(title, videoID, "listSeasons", thumbUrl, "tv")
else:
addShowDir(title, videoID, "listSeasons", thumbUrl, "tv")
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScriptTV+', '+urllib.quote_plus(str(dlParams))+')')
match = re.compile('class="pagnNext".*?href="(.+?)"', re.DOTALL).findall(content)
if match:
addDir(translation(30001), urlMain+match[0].replace("&","&"), "listShows", "DefaultTVShows.png")
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdShows+')')
def listSimilarMovies(videoID):
xbmcplugin.setContent(pluginhandle, "movies")
content = opener.open(urlMain+"/gp/product/"+videoID).read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('<li class="packshot')
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</li>')]
if 'packshot-sash-prime' in entry:
match = re.compile("data-type='downloadable_(.+?)'", re.DOTALL).findall(entry)
if match:
videoType = match[0]
match = re.compile("asin='(.+?)'", re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
dlParams.append({'type':'movie', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':''})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = ""
if match:
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if videoType == "movie":
addLinkR(title, videoID, "playVideo", thumbUrl, videoType)
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(str(dlParams))+')')
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdMovies+')')
def listSimilarShows(videoID):
xbmcplugin.setContent(pluginhandle, "tvshows")
content = opener.open(urlMain+"/gp/product/"+videoID).read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('<li class="packshot')
showEntries = []
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</li>')]
if 'packshot-sash-prime' in entry:
match = re.compile("data-type='downloadable_(.+?)'", re.DOTALL).findall(entry)
if match:
videoType = match[0]
match = re.compile("asin='(.+?)'", re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
dlParams.append({'type':'tv', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':''})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = ""
if match:
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if videoType=="tv_season":
videoType="tv"
title = cleanSeasonTitle(title)
if title in showEntries:
continue
showEntries.append(title)
addShowDirR(title, videoID, "listSeasons", thumbUrl, videoType)
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScriptTV+', '+urllib.quote_plus(str(dlParams))+')')
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdShows+')')
def listSeasons(seriesName, seriesID, thumb):
xbmcplugin.setContent(pluginhandle, "seasons")
content = opener.open(urlMain+"/gp/product/"+seriesID).read()
debug(content)
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
contentMain = content
content = content[content.find('<select name="seasonAsinAndRef"'):]
content = content[:content.find('</select>')]
match = re.compile('<option value="(.+?):.+?data-a-html-content="(.+?)"', re.DOTALL).findall(content)
if match:
for seasonID, title in match:
if "dv-dropdown-prime" in title:
if "\n" in title:
title = title[:title.find("\n")]
addSeasonDir(title, seasonID, 'listEpisodes', thumb, seriesName, seriesID)
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdSeasons+')')
else:
listEpisodes(seriesID, seriesID, thumb, contentMain)
def listEpisodes(seriesID, seasonID, thumb, content="", seriesName=""):
xbmcplugin.setContent(pluginhandle, "episodes")
if not content:
content = opener.open(urlMain+"/gp/product/"+seasonID).read()
debug(content)
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
matchSeason = re.compile('"seasonNumber":"(.+?)"', re.DOTALL).findall(content)
spl = content.split('href="'+urlMain+'/gp/product')
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</li>')]
match = re.compile('class="episode-title">(.+?)<', re.DOTALL).findall(entry)
if match and ('class="prime-logo-small"' in entry or 'class="episode-status cell-free"' in entry):
title = match[0]
title = cleanTitle(title)
episodeNr = title[:title.find('.')]
title = title[title.find('.')+1:].strip()
match = re.compile('/(.+?)/', re.DOTALL).findall(entry)
episodeID = match[0]
match = re.compile('<p>.+?</span>(.+?)</p>', re.DOTALL).findall(entry)
desc = ""
if match:
desc = cleanTitle(match[0])
length = ""
match1 = re.compile('class="dv-badge runtime">(.+?)h(.+?)min<', re.DOTALL).findall(entry)
match2 = re.compile('class="dv-badge runtime">(.+?)Std.(.+?)Min.<', re.DOTALL).findall(entry)
match3 = re.compile('class="dv-badge runtime">(.+?)min<', re.DOTALL).findall(entry)
match4 = re.compile('class="dv-badge runtime">(.+?)Min.<', re.DOTALL).findall(entry)
if match1:
length = str(int(match1[0][0].strip())*60+int(match1[0][1].strip()))
elif match2:
length = str(int(match2[0][0].strip())*60+int(match2[0][1].strip()))
elif match3:
length = match3[0].strip()
elif match4:
length = match4[0].strip()
match = re.compile('class="dv-badge release-date">(.+?)<', re.DOTALL).findall(entry)
aired = ""
if match:
aired = match[0]+"-01-01"
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
if match:
thumb = match[0].replace("._SX133_QL80_.jpg","._SX400_.jpg")
match = re.compile('class="progress-bar">.+?width: (.+?)%', re.DOTALL).findall(entry)
playcount = 0
if match:
percentage = match[0]
if int(percentage)>95:
playcount = 1
addEpisodeLink(title, episodeID, 'playVideo', thumb, desc, length, matchSeason[0], episodeNr, seriesID, playcount, aired, seriesName)
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdEpisodes+')')
def listGenres(url, videoType):
content = opener.open(url).read()
debug(content)
content = content[content.find('<ul class="column vPage1">'):]
content = content[:content.find('</div>')]
match = re.compile('href="(.+?)">.+?>(.+?)</span>.+?>(.+?)<', re.DOTALL).findall(content)
for url, title, nr in match:
if videoType=="movie":
addDir(cleanTitle(title)+nr.replace(" "," "), urlMain+url.replace("/s/","/mn/search/ajax/").replace("&","&"), 'listMovies', "")
else:
addDir(cleanTitle(title), urlMain+url.replace("/s/","/mn/search/ajax/").replace("&","&"), 'listShows', "")
xbmcplugin.endOfDirectory(pluginhandle)
def printLogInline(ptext):
if (True):
print(ptext)
def playVideo(videoID, selectQuality=False, playTrailer=False):
maxDevicesTimestamp = addon.getSetting("maxDevicesTimestamp")
try:
maxDevicesTimestamp = json.loads(maxDevicesTimestamp)
except:
maxDevicesTimestamp = []
maxDevicesTimestamp.append(0)
streamTitles = []
streamURLs = []
cMenu = False
if selectQuality:
cMenu = True
if maxBitrate==-1:
selectQuality = True
content=opener.open(urlMain+"/dp/"+videoID).read()
hasTrailer = False
if '"hasTrailer":true' in content:
hasTrailer = True
matchCID=re.compile('"customerID":"(.+?)"').findall(content)
if matchCID:
matchSWFUrl=re.compile('<script type="text/javascript" src="(.+?)"', re.DOTALL).findall(content)
flashContent=opener.open(matchSWFUrl[0]).read()
matchSWF=re.compile('LEGACY_FLASH_SWF="(.+?)"').findall(flashContent)
matchTitle=re.compile('"og:title" content="(.+?)"', re.DOTALL).findall(content)
matchThumb=re.compile('"og:image" content="(.+?)"', re.DOTALL).findall(content)
matchToken=re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
matchMID=re.compile('"marketplaceID":"(.+?)"').findall(content)
if not playTrailer or (playTrailer and hasTrailer and preferAmazonTrailer and siteVersion!="com"):
content=opener.open(urlMainS+'/gp/video/streaming/player-token.json?callback=jQuery1640'+''.join(random.choice(string.digits) for x in range(18))+'_'+str(int(time.time()*1000))+'&csrftoken='+urllib.quote_plus(matchToken[0])+'&_='+str(int(time.time()*1000))).read()
matchToken=re.compile('"token":"(.+?)"', re.DOTALL).findall(content)
content = ""
tooManyConnections = True
if playTrailer and hasTrailer and preferAmazonTrailer and siteVersion!="com":
content = opener.open('https://'+apiMain+'.amazon.com/cdp/catalog/GetStreamingTrailerUrls?version=1&format=json&firmware=WIN%2011,7,700,224%20PlugIn&marketplaceID='+urllib.quote_plus(matchMID[0])+'&token='+urllib.quote_plus(matchToken[0])+'&deviceTypeID='+urllib.quote_plus(deviceTypeID)+'&asin='+videoID+'&customerID='+urllib.quote_plus(matchCID[0])+'&deviceID='+urllib.quote_plus(matchCID[0])+str(int(time.time()*1000))+videoID).read()
elif not playTrailer:
if len(maxDevicesTimestamp) < maxDevices:
maxDevicesTimestamp += [0]
elif len(maxDevicesTimestamp) > maxDevices:
maxDevicesTimestamp.pop()
for i, val in enumerate(maxDevicesTimestamp):
if ((int(val) + maxDevicesWaitTime) <= int(time.time())):
content = opener.open('https://'+apiMain+'.amazon.com/cdp/catalog/GetStreamingUrlSets?version=1&format=json&firmware=WIN%2011,7,700,224%20PlugIn&marketplaceID='+urllib.quote_plus(matchMID[0])+'&token='+urllib.quote_plus(matchToken[0])+'&deviceTypeID='+urllib.quote_plus(deviceTypeID)+'&asin='+videoID+'&customerID='+urllib.quote_plus(matchCID[0])+'&deviceID='+urllib.quote_plus(matchCID[0])+str(int(time.time()*1000))+videoID).read()
if not "SUCCESS" in str(content):
maxDevicesTimestamp[i] = int(val) + 10
elif '$de' in str(content):
ediag = xbmcgui.Dialog()
if (ediag.yesno('Video nicht in deutsch verfügbar','Das Video ist nicht in deutscher Sprache verfügbar. Stattdessen in originaler Sprache wiedergeben?')):
content = opener.open('https://'+apiMain+'.amazon.com/cdp/catalog/GetStreamingUrlSets?version=1&format=json&xws-fa-ov=true&audioTrackId=eng_dialog_0&firmware=WIN%2011,7,700,224%20PlugIn&marketplaceID='+urllib.quote_plus(matchMID[0])+'&token='+urllib.quote_plus(matchToken[0])+'&deviceTypeID='+urllib.quote_plus(deviceTypeID)+'&asin='+videoID+'&customerID='+urllib.quote_plus(matchCID[0])+'&deviceID='+urllib.quote_plus(matchCID[0])+str(int(time.time()*1000))+videoID).read()
else:
tooManyConnections = False
break
addon.setSetting("maxDevicesTimestamp", str(json.dumps(maxDevicesTimestamp)))
elif playTrailer:
try:
strT = ""
if siteVersion=="de":
strT = "+german"
contentT = opener.open("http://gdata.youtube.com/feeds/api/videos?vq="+cleanTitle(matchTitle[0]).replace(" ", "+")+"+trailer"+strT+"&racy=include&orderby=relevance").read()
match = re.compile('<id>http://gdata.youtube.com/feeds/api/videos/(.+?)</id>', re.DOTALL).findall(contentT.split('<entry>')[1])
xbmc.Player().play("plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=" + match[0])
except:
pass
debug(content)
if content:
if not "SUCCESS" in str(content):
content = json.loads(content)
ediag = xbmcgui.Dialog()
acode = str(content['message']['body']['code'])
amessage = str(content['message']['body']['message'])
ediag.ok('Amazon meldet: '+ acode, amessage)
else:
content = json.loads(content)
thumbUrl = matchThumb[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
contentT = ""
try:
contentT = content['message']['body']['urlSets']['streamingURLInfoSet'][0]['streamingURLInfo']
except:
try:
contentT = content['message']['body']['streamingURLInfoSet']['streamingURLInfo']
except:
pass
if contentT:
url = ''
for item in contentT:
if not '$' in item['url']:
if selectQuality:
streamTitles.append(str(item['bitrate'])+"kb")
streamURLs.append(item['url'])
url = item['url']
elif item['bitrate']<=maxBitrate:
url = item['url']
if not rtmpMain in url:
try:
if selectQuality:
streamTitles = []
streamURLs = []
for item in content['message']['body']['urlSets']['streamingURLInfoSet'][1]['streamingURLInfo']:
if selectQuality:
streamTitles.append(str(item['bitrate'])+"kb")
streamURLs.append(item['url'])
elif item['bitrate']<=maxBitrate:
url = item['url']
except:
pass
if url and not '$' in url:
if selectQuality:
dialog = xbmcgui.Dialog()
nr=dialog.select(translation(30059), streamTitles)
if nr>=0:
url=streamURLs[nr]
if url.startswith("rtmpe"):
url = url.replace('rtmpe','rtmp')+' swfVfy=1 swfUrl='+matchSWF[0]+' pageUrl='+urlMain+'/dp/'+videoID+' app='+rtmpMain+'-vod playpath='+url[url.find('mp4:'):]+' tcUrl=rtmpe://'+rtmpMain+'-vodfs.fplive.net:1935/'+rtmpMain+'-vod/'
if playTrailer or (selectQuality and cMenu):
listitem = xbmcgui.ListItem(cleanTitle(matchTitle[0]), path=url, thumbnailImage=thumbUrl)
xbmc.Player().play(url, listitem)
else:
listitem = xbmcgui.ListItem(cleanTitle(matchTitle[0]), path=url, thumbnailImage=thumbUrl)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
elif url.startswith("http"):
dialog = xbmcgui.Dialog()
if dialog.yesno('Info', translation(30085)):
content=opener.open(urlMainS+"/gp/video/settings/ajax/player-preferences-endpoint.html", "rurl="+urllib.quote_plus(urlMainS+"/gp/video/settings")+"&csrfToken="+urllib.quote_plus(addon.getSetting('csrfToken'))+"&aiv-pp-toggle=flash").read()
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30086)+',10000,'+icon+')')
playVideo(videoID, selectQuality)
else:
url = ''
diag = xbmcgui.Dialog()
diag.ok('Ungültige URL', 'Das Video kann nicht wiedergegeben werden.')
else:
diag = xbmcgui.Dialog()
diag.ok('Kein Stream', 'Es wurde kein passender Stream zum Öffnen gefunden')
else:
if tooManyConnections:
diag = xbmcgui.Dialog()
diag.ok('Bitte einen Moment warten', 'Es wurden zu viele Verbindungen aufgebaut. Bitte warte einen Moment.')
else:
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30082)+',10000,'+icon+')')
def showInfo(videoID):
xbmcplugin.setContent(pluginhandle, "movies")
content=opener.open(urlMain+"/dp/"+videoID).read()
match=re.compile('<script type = "application/ld+json"> (.+?) </script>', re.DOTALL).findall(content)
jsonstr=cleanInput(match[0])
parsed=json.loads(jsonstr)
title = parsed["name"]
match=re.compile('class="release-year".*?>(.+?)<', re.DOTALL).findall(content)
year = match[0]
title = title+" ("+year+")"
title = cleanTitle(title)
thumb = parsed["thumbnailUrl"].replace(".jpg", "")
thumb = thumb[:thumb.rfind(".")]+".jpg"
director = parsed["director"][0]["name"].replace(",",", ")
actors = parsed["actor"][0]["name"].replace(",",", ")
match=re.compile('property="og:duration" content="(.+?)"', re.DOTALL).findall(content)
length = str(int(match[0])/60)+" min."
rating = parsed["aggregateRating"]["ratingValue"]
ratingCount = parsed["aggregateRating"]["reviewCount"]
description = parsed["description"]
genre = parsed["genre"]
addLink(title, videoID, "playVideo", thumb, videoType="movie", desc=description, duration=length, year=year, mpaa="", director=director, genre=genre, rating=rating)
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdDetails+')')
try:
wnd = xbmcgui.Window(xbmcgui.getCurrentWindowId())
wnd.getControl(wnd.getFocusId()).selectItem(1)
except:
pass
def deleteCookies():
if os.path.exists(cookieFile):
os.remove(cookieFile)
def deleteCache():
if os.path.exists(cacheFolder):
try:
shutil.rmtree(cacheFolder)
except:
shutil.rmtree(cacheFolder)
def search(type):
keyboard = xbmc.Keyboard('', translation(30015))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
search_string = keyboard.getText().replace(" ", "+")
if siteVersion=="de":
if type=="movies":
listMovies(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356018031&field-keywords="+search_string)
elif type=="tv":
listShows(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356019031&field-keywords="+search_string)
elif siteVersion=="com":
if type=="movies":
listMovies(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D7613704011&field-keywords="+search_string)
elif type=="tv":
listShows(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D7613705011&field-keywords="+search_string)
elif siteVersion=="co.uk":
if type=="movies":
listMovies(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356010031&field-keywords="+search_string)
elif type=="tv":
listShows(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356011031&field-keywords="+search_string)
def addToQueue(videoID, videoType):
if videoType=="tv":
videoType = "tv_episode"
content = opener.open(urlMain+"/gp/video/watchlist/ajax/addRemove.html/ref=sr_1_1_watchlist_add?token="+urllib.quote_plus(addon.getSetting('csrfToken'))+"&dataType=json&prodType="+videoType+"&ASIN="+videoID+"&pageType=Search&subPageType=SASLeafSingleSearch&store=instant-video").read()
if showNotification:
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30088)+',3000,'+icon+')')
def removeFromQueue(videoID, videoType):
if videoType=="tv":
videoType = "tv_episode"
content = opener.open(urlMain+"/gp/video/watchlist/ajax/addRemove.html/ref=sr_1_1_watchlist_remove?token="+urllib.quote_plus(addon.getSetting('csrfToken'))+"&dataType=json&prodType="+videoType+"&ASIN="+videoID+"&pageType=Search&subPageType=SASLeafSingleSearch&store=instant-video").read()
xbmc.executebuiltin("Container.Refresh")
if showNotification:
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30089)+',3000,'+icon+')')
def login():
content = opener.open(urlMain).read()
if '"isPrime":1' in content:
return "prime"
elif 'id="nav-item-signout"' in content:
return "noprime"
else:
content = ""
keyboard = xbmc.Keyboard('', translation(30090))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
email = keyboard.getText()
keyboard = xbmc.Keyboard('', translation(30091))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
password = keyboard.getText()
br = mechanize.Browser()
br.set_cookiejar(cj)
br.set_handle_robots(False)
br.addheaders = [('User-agent', userAgent)]
content = br.open(urlMainS+"/gp/sign-in.html")
br.select_form(name="signIn")
br["email"] = email
br["password"] = password
content = br.submit().read()
cj.save(cookieFile)
content = opener.open(urlMain).read()
if '"isPrime":1' in content:
return "prime"
elif 'id="nav-item-signout"' in content:
return "noprime"
else:
return "none"
def cleanTitle(title):
if "[HD]" in title:
title = title[:title.find("[HD]")]
title = title.replace("&","&").replace("'","'").replace("é","é").replace("ä","ä").replace("ö","ö").replace("ü","ü").replace("Ä","Ä").replace("Ö","Ö").replace("Ü","Ü").replace("ß","ß").replace("…","…")
title = title.replace("é","é").replace("ä","ä").replace("ö","ö").replace("ü","ü").replace("Ä","Ä").replace("Ö","Ö").replace("Ü","Ü").replace("ß","ß")
return title.replace("\xe4","ä").replace("\xf6","ö").replace("\xfc","ü").replace("\xc4","Ä").replace("\xd6","Ö").replace("\xdc","Ü").replace("\xdf","ß").strip()
def cleanSeasonTitle(title):
if ": The Complete" in title:
title = title[:title.rfind(": The Complete")]
if "Season" in title:
title = title[:title.rfind("Season")]
if "Staffel" in title:
title = title[:title.rfind("Staffel")]
if "Volume" in title:
title = title[:title.rfind("Volume")]
if "Series" in title:
title = title[:title.rfind("Series")]
return title.strip(" -,")
def cleanTitleTMDB(title):
if "[" in title:
title = title[:title.find("[")]
if " OmU" in title:
title = title[:title.find(" OmU")]
return title
def addMovieToLibrary(movieID, title):
movieFolderName = (''.join(c for c in unicode(title, 'utf-8') if c not in '/\\:?"*|<>')).strip(' .')
dir = os.path.join(libraryFolderMovies, movieFolderName)
if not os.path.isdir(dir):
xbmcvfs.mkdir(dir)
fh = xbmcvfs.File(os.path.join(dir, "movie.strm"), 'w')
fh.write('plugin://'+addonID+'/?mode=playVideo&url='+movieID)
fh.close()
if updateDB:
xbmc.executebuiltin('UpdateLibrary(video)')
def addSeasonToLibrary(seriesID, seriesTitle, seasonID):
seriesFolderName = (''.join(c for c in unicode(seriesTitle, 'utf-8') if c not in '/\\:?"*|<>')).strip(' .')
seriesDir = os.path.join(libraryFolderTV, seriesFolderName)
if not os.path.isdir(seriesDir):
xbmcvfs.mkdir(seriesDir)
content = opener.open(urlMain+"/gp/product/"+seasonID).read()
matchSeason = re.compile('"seasonNumber":"(.+?)"', re.DOTALL).findall(content)
spl = content.split('href="'+urlMain+'/gp/product')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('class="episode-title">(.+?)<', re.DOTALL).findall(entry)
if match:
title = match[0]
title = cleanTitle(title)
episodeNr = title[:title.find('.')]
title = title[title.find('.')+1:].strip()
match = re.compile('/(.+?)/', re.DOTALL).findall(entry)
episodeID = match[0]
if len(episodeNr) > 2:
episodeNr = ''.join(re.findall(r'\d+', episodeNr))
if len(episodeNr) == 1:
episodeNr = "0"+episodeNr
seasonNr = matchSeason[0]
if len(seasonNr) == 1:
seasonNr = "0"+seasonNr
filename = "S"+seasonNr+"E"+episodeNr+" - "+title+".strm"
filename = (''.join(c for c in unicode(filename, 'utf-8') if c not in '/\\:?"*|<>')).strip(' .')
fh = xbmcvfs.File(os.path.join(seriesDir, filename), 'w')
fh.write('plugin://'+addonID+'/?mode=playVideo&url='+episodeID)
fh.close()
if updateDB:
xbmc.executebuiltin('UpdateLibrary(video)')
def debug(content):
fh=open(debugFile, "w")
fh.write(content)
fh.close()
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addDir(name, url, mode, iconimage, videoType=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&videoType="+urllib.quote_plus(videoType)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name})
liz.setProperty("fanart_image", defaultFanart)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addShowDir(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
if os.path.exists(coverFile):
iconimage = coverFile
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&name="+urllib.quote_plus(name)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30052), 'RunPlugin(plugin://'+addonID+'/?mode=addToQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addShowDirR(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
if os.path.exists(coverFile):
iconimage = coverFile
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&name="+urllib.quote_plus(name)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30053), 'RunPlugin(plugin://'+addonID+'/?mode=removeFromQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addLink(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30054), 'RunPlugin(plugin://'+addonID+'/?mode=playVideo&url='+urllib.quote_plus(url)+'&selectQuality=true)',))
if videoType != "episode":
entries.append((translation(30060), 'Container.Update(plugin://'+addonID+'/?mode=showInfo&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30052), 'RunPlugin(plugin://'+addonID+'/?mode=addToQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
if videoType == "movie":
titleTemp = name.strip()
if year:
titleTemp += ' ('+year+')'
entries.append((translation(30055), 'RunPlugin(plugin://'+addonID+'/?mode=addMovieToLibrary&url='+urllib.quote_plus(url)+'&name='+urllib.quote_plus(titleTemp)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
liz.setProperty('IsPlayable', 'true')
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addLinkR(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30054), 'RunPlugin(plugin://'+addonID+'/?mode=playVideo&url='+urllib.quote_plus(url)+'&selectQuality=true)',))
entries.append((translation(30060), 'Container.Update(plugin://'+addonID+'/?mode=showInfo&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30053), 'RunPlugin(plugin://'+addonID+'/?mode=removeFromQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
if videoType == "movie":
titleTemp = name.strip()
if year:
titleTemp += ' ('+year+')'
entries.append((translation(30055), 'RunPlugin(plugin://'+addonID+'/?mode=addMovieToLibrary&url='+urllib.quote_plus(url)+'&name='+urllib.quote_plus(titleTemp)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
liz.setProperty('IsPlayable', 'true')
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addSeasonDir(name, url, mode, iconimage, seriesName, seriesID):
filename = (''.join(c for c in unicode(seriesID, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&seriesID="+urllib.quote_plus(seriesID)+"&thumb="+urllib.quote_plus(iconimage)+"&name="+urllib.quote_plus(seriesName)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "TVShowTitle": seriesName})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30056), 'RunPlugin(plugin://'+addonID+'/?mode=addSeasonToLibrary&url='+urllib.quote_plus(url)+'&seriesID='+urllib.quote_plus(seriesID)+'&name='+urllib.quote_plus(seriesName.strip())+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addEpisodeLink(name, url, mode, iconimage, desc="", duration="", season="", episodeNr="", seriesID="", playcount="", aired="", seriesName=""):
filename = (''.join(c for c in unicode(seriesID, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "season": season, "episode": episodeNr, "aired": aired, "playcount": playcount, "TVShowTitle": seriesName})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30054), 'RunPlugin(plugin://'+addonID+'/?mode=playVideo&url='+urllib.quote_plus(url)+'&selectQuality=true)',))
liz.addContextMenuItems(entries)
liz.setProperty('IsPlayable', 'true')
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
thumb = urllib.unquote_plus(params.get('thumb', ''))
name = urllib.unquote_plus(params.get('name', ''))
season = urllib.unquote_plus(params.get('season', ''))
seriesID = urllib.unquote_plus(params.get('seriesID', ''))
videoType = urllib.unquote_plus(params.get('videoType', ''))
selectQuality = urllib.unquote_plus(params.get('selectQuality', ''))
if mode == 'listMovies':
listMovies(url)
elif mode == 'listShows':
listShows(url)
elif mode == 'listWatchList':
listWatchList(url)
elif mode == 'listGenres':
listGenres(url, videoType)
elif mode == 'addToQueue':
addToQueue(url, videoType)
elif mode == 'removeFromQueue':
removeFromQueue(url, videoType)
elif mode == 'playVideo':
playVideo(url, selectQuality=="true")
elif mode == 'playVideoSelect':
playVideo(url, True)
elif mode == 'browseMovies':
browseMovies()
elif mode == 'browseTV':
browseTV()
elif mode == 'search':
search(url)
elif mode == 'login':
login()
elif mode == 'listDecadesMovie':
listDecadesMovie()
elif mode == 'listOriginals':
listOriginals()
elif mode == 'listSeasons':
listSeasons(name, url, thumb)
elif mode == 'listEpisodes':
listEpisodes(seriesID, url, thumb, "", name)
elif mode == 'deleteCookies':
deleteCookies()
elif mode == 'deleteCache':
deleteCache()
elif mode == 'playTrailer':
playVideo(url, selectQuality=="true", True)
elif mode == 'listSimilarMovies':
listSimilarMovies(url)
elif mode == 'listSimilarShows':
listSimilarShows(url)
elif mode == 'showInfo':
showInfo(url)
elif mode == 'addMyListToLibrary':
addMyListToLibrary()
elif mode == 'addMovieToLibrary':
addMovieToLibrary(url, name)
elif mode == 'addSeasonToLibrary':
addSeasonToLibrary(seriesID, name, url)
else:
index()
| gpl-2.0 |
santosjorge/cufflinks | cufflinks/quant_figure.py | 1 | 38707 |
"""
QuantFigure allows you to create a persistent object.
Annotations and Technical Studies can be added on demand.
It accepts any dataframe with a timeseries index.
Try it out:
qf=cf.QuantFig(cf.datagen.ohlc())
qf.iplot()
"""
from __future__ import absolute_import
import plotly.graph_objs as go
import json
import copy
import pandas as pd
from .plotlytools import iplot as pt_iplot
from . import tools
from . import ta
from . import utils
from . import colors
from . import auth
from . import date_tools
__QUANT_FIGURE_DATA = ['kind','showlegend','datalegend','name','slice','resample','bestfit',
'text','title','yTitle','secondary_y_title','bestfit_colors','kind',
'colorscale','xTitle','colors','secondary_y']
__QUANT_FIGURE_LAYOUT = ['annotations','showlegend','margin','rangeselector','rangeslider','shapes',
'width','height','dimensions']
__QUANT_FIGURE_THEME = ['theme','up_color','down_color']
__QUANT_FIGURE_PANELS = ['min_panel_size','spacing','top_margin','bottom_margin']
def get_layout_kwargs():
return tools.__LAYOUT_KWARGS
def get_annotation_kwargs():
return tools.__ANN_KWARGS
def get_shapes_kwargs(): return tools.__SHAPES_KWARGS
class QuantFig(object):
def __init__(self,df,kind='candlestick',columns=None,**kwargs):
self.df=df
self.studies={}
self.data={}
self.theme={}
self.panels={}
self.layout={}
self.trendlines=[]
self.kwargs={}
# Set column names
if not columns:
columns={}
for _ in ['open','high','low','close','volume']:
columns[_]=kwargs.pop(_,'')
self._d=ta._ohlc_dict(df,**columns)
# Set initial annotations
annotations={
'values':[],
'params':utils.check_kwargs(kwargs,get_annotation_kwargs(),{},clean_origin=True)
}
ann_values=kwargs.pop('annotations',None)
if ann_values:
if utils.is_list(ann_values):
annotations['values'].extend(ann_values)
else:
annotations['values'].append(ann_values)
# self.data initial values
self.data.update(datalegend=kwargs.pop('datalegend',True),name=kwargs.pop('name','Trace 1'),kind=kind)
self.data.update(slice=kwargs.pop('slice',(None,None)),resample=kwargs.pop('resample',None))
# self.layout initial values
self.layout['shapes']=utils.check_kwargs(kwargs,get_shapes_kwargs(),{},clean_origin=True)
for k,v in list(self.layout['shapes'].items()):
if not isinstance(v,list):
self.layout['shapes'][k]=[v]
self.layout['rangeselector']=kwargs.pop('rangeselector',{'visible':False})
self.layout['rangeslider']=kwargs.pop('rangeslider',False)
self.layout['margin']=kwargs.pop('margin',dict(t=30,b=30,r=30,l=30))
self.layout['annotations']=annotations
self.layout['showlegend']=kwargs.pop('showlegend',True)
self.layout.update(utils.check_kwargs(kwargs,get_layout_kwargs(),{},clean_origin=True))
# self.theme initial values
self.theme['theme']=kwargs.pop('theme',auth.get_config_file()['theme'])
self.theme['up_color']=kwargs.pop('up_color','#17BECF') # java
self.theme['down_color']=kwargs.pop('down_color','grey')
# self.panels initial values
self.panels['min_panel_size']=kwargs.pop('min_panel_size',.15)
self.panels['spacing']=kwargs.pop('spacing',.08)
self.panels['top_margin']=kwargs.pop('top_margin',0.9)
self.panels['bottom_margin']=kwargs.pop('top_margin',0)
self.update(**kwargs)
def _get_schema(self):
"""
Returns a dictionary with the schema for a QuantFigure
"""
d={}
layout_kwargs=dict((_,'') for _ in get_layout_kwargs())
for _ in ('data','layout','theme','panels'):
d[_]={}
for __ in eval('__QUANT_FIGURE_{0}'.format(_.upper())):
layout_kwargs.pop(__,None)
d[_][__]=None
d['layout'].update(annotations=dict(values=[],
params=utils.make_dict_from_list(get_annotation_kwargs())))
d['layout'].update(shapes=utils.make_dict_from_list(get_shapes_kwargs()))
[layout_kwargs.pop(_,None) for _ in get_annotation_kwargs()+get_shapes_kwargs()]
d['layout'].update(**layout_kwargs)
return d
def _get_sliced(self,slice,df=None,to_strfmt='%Y-%m-%d',from_strfmt='%d%b%y'):
"""
Returns a sliced DataFrame
Parameters
----------
slice : tuple(from,to)
from : str - ie ('01Jan2019')
to : str - ie ('01Jan2020')
States the 'from' and 'to' values which
will get rendered as df.loc[from:to]
df : DataFrame
If omitted then the QuantFigure.DataFrame is resampled.
"""
df=self.df.copy() if df==None else df
if type(slice) not in (list,tuple):
raise Exception('Slice must be a tuple two values')
if len(slice)!=2:
raise Exception('Slice must be a tuple two values')
a,b=slice
a=None if a in ('',None) else utils.make_string(a)
b=None if b in ('',None) else utils.make_string(b)
if a:
a=date_tools.stringToString(a,from_strfmt,to_strfmt) if '-' not in a else a
if b:
b=date_tools.stringToString(b,from_strfmt,to_strfmt) if '-' not in b else b
return df.loc[a:b]
def _get_resampled(self,rule,how={'ohlc':'last','volume':'sum'},df=None,**kwargs):
"""
Returns a resampled DataFrame
Parameters
----------
rule : str
the offset string or object representing target conversion
for all aliases available see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
how : str or dict
states the form in which the resampling will be done.
Examples:
how={'volume':'sum'}
how='count'
df : DataFrame
If omitted then the QuantFigure.DataFrame is resampled.
kwargs
For more information see http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
"""
df=self.df.copy() if df is None else df
if rule==None:
return df
else:
if isinstance(how,dict):
if 'ohlc' in how:
v=how.pop('ohlc')
for _ in ['open','high','low','close']:
how[_]=v
_how=how.copy()
for _ in _how:
if _ not in self._d:
del how[_]
return df.resample(rule=rule,**kwargs).apply(how)
def update(self,**kwargs):
"""
Updates the values for a QuantFigure
The key-values are automatically assigned to the correct
section of the QuantFigure
"""
if 'columns' in kwargs:
self._d=ta._ohlc_dict(self.df,columns=kwargs.pop('columns',None))
schema=self._get_schema()
annotations=kwargs.pop('annotations',None)
if annotations:
self.layout['annotations']['values']=utils.make_list(annotations)
for k,v in list(kwargs.items()):
try:
utils.dict_update(self.__dict__,k,v,schema)
except:
self.kwargs.update({k:v})
def delete(self,*args):
"""
Deletes the values for a QuantFigure
The key-values are automatically deleted from the correct
section of the QuantFigure
"""
if args:
args=args[0] if utils.is_list(args[0]) else args
path=utils.dict_path(self.__dict__)
for _ in args:
if _ in self.__dict__.keys():
raise Exception('"{0}" cannot be deleted'.format(_))
for a in args:
try:
if a in ('shapes'):
self.layout[a].clear()
elif a=='annotations':
self.layout['annotations']={'values':[],'params':{}}
else:
del reduce(dict.get, path[a],self.__dict__)[a]
except:
raise Exception('Key: {0} not found'.format(a))
def figure(self,**kwargs):
"""
Returns a Plotly figure
"""
kwargs['asFigure']=True
return self.iplot(**kwargs)
def _panel_domains(self,n=2,min_panel_size=.15,spacing=0.08,top_margin=1,bottom_margin=0):
"""
Returns the panel domains for each axis
"""
d={}
for _ in range(n+1,1,-1):
lower=round(bottom_margin+(min_panel_size+spacing)*(n+1-_),2)
d['yaxis{0}'.format(_)]=dict(domain=(lower,lower+min_panel_size))
top=d['yaxis2']['domain']
d['yaxis2']['domain']=(top[0],top_margin)
return d
def _get_trendline(self,date0=None,date1=None,on=None,kind='trend',to_strfmt='%Y-%m-%d',from_strfmt='%d%b%y',**kwargs):
"""
Returns a trendline (line), support or resistance
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
kind : string
Defines de kind of trendline
'trend'
'resistance'
'support'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date0)
'toend' : (date0,x1)
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
ann_values=copy.deepcopy(get_annotation_kwargs())
ann_values.extend(['x','y'])
ann_kwargs=utils.check_kwargs(kwargs,ann_values,{},clean_origin=True)
def position(d0,d1):
return d0+(d1-d0)/2
date0=kwargs.pop('date',date0)
date0=date_tools.stringToString(date0,from_strfmt,to_strfmt) if '-' not in date0 else date0
if kind=='trend':
date1=date_tools.stringToString(date1,from_strfmt,to_strfmt) if '-' not in date1 else date1
on='close' if not on else on
df=pd.DataFrame(self.df[self._d[on]])
y0=kwargs.get('y0',df.loc[date0].values[0])
y1=kwargs.get('y1',df.loc[date1].values[0])
if kind in ('support','resistance'):
mode=kwargs.pop('mode','starttoend')
if not on:
on='low' if kind=='support' else 'high'
df=pd.DataFrame(self.df[self._d[on]])
y0=kwargs.get('y0',df.loc[date0].values[0])
y1=kwargs.get('y1',y0)
if mode=='starttoend':
date0=df.index[0]
date1=df.index[-1]
elif mode=='toend':
date1=df.index[-1]
elif mode=='fromstart':
date1=date0
date0=df.index[0]
if isinstance(date0,pd.Timestamp):
date0=date_tools.dateToString(date0,to_strfmt)
if isinstance(date1,pd.Timestamp):
date1=date_tools.dateToString(date1,to_strfmt)
d={'x0':date0,'x1':date1,'y0':y0,'y1':y1}
d.update(**kwargs)
shape=tools.get_shape(**d)
if ann_kwargs.get('text',False):
ann_kwargs['x']=ann_kwargs.get('x',date_tools.dateToString(position(date_tools.stringToDate(date0,to_strfmt),date_tools.stringToDate(date1,to_strfmt)),to_strfmt))
ann_kwargs['y']=ann_kwargs.get('y',position(shape['y0'],shape['y1']))
else:
ann_kwargs={}
return {'shape':shape,'annotation':ann_kwargs}
def add_trendline(self,date0,date1,on='close',text=None,**kwargs):
"""
Adds a trendline to the QuantFigure.
Given 2 dates, the trendline is connected on the data points
that correspond to those dates.
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
kwargs:
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
d={'kind':'trend','date0':date0,'date1':date1,'on':on,'text':text}
d.update(**kwargs)
self.trendlines.append(d)
def add_support(self,date,on='low',mode='starttoend',text=None,**kwargs):
"""
Adds a support line to the QuantFigure
Parameters:
date0 : string
The support line will be drawn at the 'y' level
value that corresponds to this date.
on : string
Indicate the data series in which the
support line should be based.
'close'
'high'
'low'
'open'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date)
'toend' : (date,x1)
text : string
If passed, then an annotation will be added
to the support line (at mid point)
kwargs:
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
d={'kind':'support','date':date,'mode':mode,'on':on,'text':text}
d.update(**kwargs)
self.trendlines.append(d)
def add_resistance(self,date,on='high',mode='starttoend',text=None,**kwargs):
"""
Adds a resistance line to the QuantFigure
Parameters:
date0 : string
The resistance line will be drawn at the 'y' level
value that corresponds to this date.
on : string
Indicate the data series in which the
resistance should be based.
'close'
'high'
'low'
'open'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date)
'toend' : (date,x1)
text : string
If passed, then an annotation will be added
to the resistance (at mid point)
kwargs:
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
d={'kind':'resistance','date':date,'mode':mode,'on':on,'text':text}
d.update(**kwargs)
self.trendlines.append(d)
def add_annotations(self,annotations,**kwargs):
"""
Add an annotation to the QuantFigure.
Parameters:
annotations : dict or list(dict,)
Annotations can be on the form form of
{'date' : 'text'}
and the text will automatically be placed at the
right level on the chart
or
A Plotly fully defined annotation
kwargs :
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Textt angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters.
"""
ann_kwargs=utils.check_kwargs(kwargs,get_annotation_kwargs(),{},clean_origin=True)
if type(annotations)==list:
self.layout['annotations']['values'].extend(annotations)
else:
self.layout['annotations']['values'].append(annotations)
if ann_kwargs:
self.layout['annotations']['params'].update(**ann_kwargs)
def add_shapes(self,**kwargs):
"""
Add a shape to the QuantFigure.
kwargs :
hline : int, list or dict
Draws a horizontal line at the
indicated y position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
vline : int, list or dict
Draws a vertical line at the
indicated x position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
hspan : (y0,y1)
Draws a horizontal rectangle at the
indicated (y0,y1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
vspan : (x0,x1)
Draws a vertical rectangle at the
indicated (x0,x1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
shapes : dict or list(dict)
List of dictionaries with the
specifications of a given shape.
See help(cufflinks.tools.get_shape)
for more information
"""
kwargs=utils.check_kwargs(kwargs,get_shapes_kwargs(),{},clean_origin=True)
for k,v in list(kwargs.items()):
if k in self.layout['shapes']:
if utils.is_list(v):
self.layout['shapes'][k].extend(v)
else:
self.layout['shapes'][k].append(v)
else:
self.layout['shapes'][k]=utils.make_list(v)
# def add_study(self,name,params={}):
# if 'kind' in params:
# if params['kind'] in self._valid_studies:
# self.studies[name]=params
# else:
# raise Exception('Invalid study: {0}'.format(params['kind']))
# else:
# raise Exception('Study kind required')
def _add_study(self,study):
"""
Adds a study to QuantFigure.studies
Parameters:
study : dict
{'kind':study_kind,
'params':study_parameters,
'display':display_parameters}
"""
str='{study} {name}({period})' if study['params'].get('str',None)==None else study['params']['str']
study['params']['str']=str
if not study['name']:
study['name']=ta.get_column_name(study['kind'].upper(),study=study['kind'],
str=str,
period=study['params'].get('periods',None),
column=study['params'].get('column',None))
restore=study['display'].pop('restore',False)
if restore:
_=self.studies.pop(study['kind'],None)
if study['kind'] in self.studies:
try:
id='{0} ({1})'.format(study['kind'],study['params']['periods'])
except:
id='{0} ({1})'.format(study['kind'],'(2)')
else:
id=study['kind']
_id=id
n=1
while id in self.studies:
id='{0} ({1})'.format(_id,n)
n+=1
self.studies[id]=study
def add_volume(self,colorchange=True,column=None,name='',str='{name}',**kwargs):
"""
Add 'volume' study to QuantFigure.studies
Parameters:
colorchange : bool
If True then each volume bar will have a fill color
depending on if 'base' had a positive or negative
change compared to the previous value
If False then each volume bar will have a fill color
depending on if the volume data itself had a positive or negative
change compared to the previous value
column :string
Defines the data column name that contains the volume data.
Default: 'volume'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs :
base : string
Defines the column which will define the
positive/negative changes (if colorchange=True).
Default = 'close'
up_color : string
Color for positive bars
down_color : string
Color for negative bars
"""
if not column:
column=self._d['volume']
up_color=kwargs.pop('up_color',self.theme['up_color'])
down_color=kwargs.pop('down_color',self.theme['down_color'])
study={'kind':'volume',
'name':name,
'params':{'colorchange':colorchange,'base':'close','column':column,
'str':None},
'display':utils.merge_dict({'up_color':up_color,'down_color':down_color},kwargs)}
self._add_study(study)
def add_macd(self,fast_period=12,slow_period=26,signal_period=9,column=None,
name='',str=None,**kwargs):
"""
Add Moving Average Convergence Divergence (MACD) study to QuantFigure.studies
Parameters:
fast_period : int
MACD Fast Period
slow_period : int
MACD Slow Period
signal_period : int
MACD Signal Period
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
if not column:
column=self._d['close']
study={'kind':'macd',
'name':name,
'params':{'fast_period':fast_period,'slow_period':slow_period,
'signal_period':signal_period,'column':column,
'str':str},
'display':utils.merge_dict({'legendgroup':False,'colors':['blue','red']},kwargs)}
study['params']['periods']='[{0},{1},{2}]'.format(fast_period,slow_period,signal_period)
self._add_study(study)
def add_sma(self,periods=20,column=None,name='',
str=None,**kwargs):
"""
Add Simple Moving Average (SMA) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
if not column:
column=self._d['close']
study={'kind':'sma',
'name':name,
'params':{'periods':periods,'column':column,
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study)
def add_rsi(self,periods=20,rsi_upper=70,rsi_lower=30,showbands=True,column=None,
name='',str=None,**kwargs):
"""
Add Relative Strength Indicator (RSI) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
rsi_upper : int
bounds [0,100]
Upper (overbought) level
rsi_lower : int
bounds [0,100]
Lower (oversold) level
showbands : boolean
If True, then the rsi_upper and
rsi_lower levels are displayed
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
if not column:
column=self._d['close']
str=str if str else '{name}({column},{period})'
study={'kind':'rsi',
'name':name,
'params':{'periods':periods,'column':column,
'str':str},
'display':utils.merge_dict({'legendgroup':True,'rsi_upper':rsi_upper,
'rsi_lower':rsi_lower,'showbands':showbands},kwargs)}
self._add_study(study)
def add_bollinger_bands(self,periods=20,boll_std=2,fill=True,column=None,name='',
str='{name}({column},{period})',**kwargs):
"""
Add Bollinger Bands (BOLL) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
boll_std : int
Number of standard deviations for
the bollinger upper and lower bands
fill : boolean
If True, then the innner area of the
bands will filled
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
fillcolor : string
Color to be used for the fill color.
Example:
'rgba(62, 111, 176, .4)'
All formatting values available on iplot()
"""
if not column:
column=self._d['close']
study={'kind':'boll',
'name':name,
'params':{'periods':periods,'boll_std':boll_std,'column':column,
'str':str},
'display':utils.merge_dict({'legendgroup':True,'fill':fill},kwargs)}
self._add_study(study)
def add_ema(self,periods=20,column=None,str=None,
name='',**kwargs):
"""
Add Exponential Moving Average (EMA) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
if not column:
column=self._d['close']
study={'kind':'ema',
'name':name,
'params':{'periods':periods,'column':column,
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study)
def add_cci(self,periods=14,cci_upper=100,cci_lower=-100,
showbands=True,str=None,name='',**kwargs):
"""
Commodity Channel Indicator study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
cci_upper : int
Upper bands level
default : 100
cci_lower : int
Lower band level
default : -100
showbands : boolean
If True, then the cci_upper and
cci_lower levels are displayed
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
study={'kind':'cci',
'name':name,
'params':{'periods':periods,'high':self._d['high'],'low':self._d['low'],'close':self._d['close'],
'str':str},
'display':utils.merge_dict({'legendgroup':True,'cci_upper':cci_upper,
'cci_lower':cci_lower,'showbands':showbands},kwargs)}
self._add_study(study)
def add_adx(self,periods=14,str=None,name='',**kwargs):
"""
Add Average Directional Index (ADX) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
study={'kind':'adx',
'name':name,
'params':{'periods':periods,'high':self._d['high'],'low':self._d['low'],'close':self._d['close'],
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study)
def add_ptps(self,periods=14,af=0.2,initial='long',str=None,name='',**kwargs):
"""
Add Parabolic SAR (PTPS) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
af : float
acceleration factor
initial : 'long' or 'short'
Iniital position
default: long
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
study={'kind':'ptps',
'name':name,
'params':{'periods':periods,'high':self._d['high'],'low':self._d['low'],'af':af,'initial':initial,
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study)
def add_atr(self,periods=14,str=None,name='',**kwargs):
"""
Add Average True Range (ATR) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
study={'kind':'atr',
'name':name,
'params':{'periods':periods,'high':self._d['high'],'low':self._d['low'],'close':self._d['close'],
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study)
def add_dmi(self,periods=14,str='{name}({period})',
name='',**kwargs):
"""
Add Directional Movement Index (DMI) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
study={'kind':'dmi',
'name':name,
'params':{'periods':periods,'high':self._d['high'],'low':self._d['low'],'close':self._d['close'],
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study)
def _get_study_figure(self,study_id,**kwargs):
study=copy.deepcopy(self.studies[study_id])
kind=study['kind']
display=study['display']
display['theme']=display.get('theme',self.theme['theme'])
params=study['params']
name=study['name']
params.update(include=False)
local_kwargs={}
_slice=kwargs.pop('slice',self.data.get('slice',(None,None)))
_resample=kwargs.pop('resample',self.data.get('resample',None))
df=self._get_sliced(_slice).copy()
if _resample:
if utils.is_list(_resample):
df=self._get_resampled(*_resample,df=df)
elif utils.is_dict(_resample):
_resample.update(df=df)
df=self._get_resampled(**_resample)
else:
df=self._get_resampled(_resample,df=df)
def get_params(locals_list,params,display,append_study=True):
locals_list.append('legendgroup')
local_kwargs=utils.check_kwargs(display,locals_list,{},True)
display.update(kwargs)
if append_study:
display=dict([('study_'+k,v) for k,v in display.items()])
params.update(display)
return local_kwargs,params
if kind=='volume':
bar_colors=[]
local_kwargs,params=get_params([],params,display,False)
#Fix for 152
base_column=params['base'] if params['colorchange'] else 'volume'
base=df[self._d[base_column]]
up_color=colors.normalize(display['up_color']) if 'rgba' not in display['up_color'] else display['up_color']
down_color=colors.normalize(display['down_color']) if 'rgba' not in display['down_color'] else display['down_color']
study_kwargs=utils.kwargs_from_keyword(kwargs,{},'study')
for i in range(len(base)):
if i != 0:
if base[i] > base[i-1]:
bar_colors.append(up_color)
else:
bar_colors.append(down_color)
else:
bar_colors.append(down_color)
fig=df[params['column']].figure(kind='bar',theme=params['theme'],**kwargs)
fig['data'][0].update(marker=dict(color=bar_colors,line=dict(color=bar_colors)),
opacity=0.8)
if kind in ('sma','ema','atr','adx','dmi','ptps'):
local_kwargs,params=get_params([],params,display)
fig=df.ta_figure(study=kind,**params)
if kind=='boll':
local_kwargs,params=get_params(['fill','fillcolor'],params,display)
fig=df.ta_figure(study=kind,**params)
if local_kwargs['fill']:
fillcolor=local_kwargs.pop('fillcolor',fig['data'][2]['line']['color'] or 'rgba(200,200,200,.1)')
fillcolor=colors.to_rgba(fillcolor,.1)
fig['data'][2].update(fill='tonexty',fillcolor=fillcolor)
if kind=='rsi':
locals_list=['rsi_lower','rsi_upper','showbands']
local_kwargs,params=get_params(locals_list,params,display)
fig=df.ta_figure(study=kind,**params)
# del fig.layout['shapes']
# if local_kwargs['showbands']:
# up_color=kwargs.get('up_color',self.theme['up_color'])
# down_color=kwargs.get('down_color',self.theme['down_color'])
# for _ in ('rsi_lower','rsi_upper'):
# trace=fig.data[0].copy()
# trace.update(y=[local_kwargs[_] for x in trace['x']])
# trace.update(name='')
# color=down_color if 'lower' in _ else up_color
# trace.update(line=dict(color=color,width=1))
# fig.data.append(trace)
if kind=='cci':
locals_list=['cci_lower','cci_upper','showbands']
local_kwargs,params=get_params(locals_list,params,display)
fig=df.ta_figure(study=kind,**params)
# del fig.layout['shapes']
# if local_kwargs['showbands']:
# up_color=kwargs.get('up_color',self.theme['up_color'])
# down_color=kwargs.get('down_color',self.theme['down_color'])
# for _ in ('cci_lower','cci_upper'):
# trace=fig.data[0].copy()
# trace.update(y=[local_kwargs[_] for x in trace['x']])
# trace.update(name='')
# color=down_color if 'lower' in _ else up_color
# trace.update(line=dict(color=color,width=1))
# fig.data.append(trace)
if kind=='macd':
local_kwargs,params=get_params([],params,display)
fig=df.ta_figure(study=kind,**params)
if local_kwargs.get('legendgroup',False):
for trace in fig['data']:
trace['legendgroup'] = name
trace['showlegend'] = False
fig['data'][0].update(showlegend=True,name=name)
## Has Bands
if kind in ('rsi','cci'):
fig=tools.fig_to_dict(fig)
_upper='{0}_upper'.format(kind)
_lower='{0}_lower'.format(kind)
del fig['layout']['shapes']
if local_kwargs['showbands']:
up_color=kwargs.get('up_color',self.theme['up_color'])
down_color=kwargs.get('down_color',self.theme['down_color'])
for _ in (_lower,_upper):
trace=copy.deepcopy(fig['data'][0])
trace.update(y=[local_kwargs[_] for x in trace['x']])
trace.update(name='')
color=down_color if 'lower' in _ else up_color
trace.update(line=dict(color=color,width=1))
fig['data'].append(trace)
return fig
def iplot(self,**kwargs):
__QUANT_FIGURE_EXPORT = ['asFigure','asUrl','asImage','asPlot','display_image','validate',
'sharing','online','filename','dimensions']
layout=copy.deepcopy(self.layout)
data=copy.deepcopy(self.data)
self_kwargs=copy.deepcopy(self.kwargs)
data['slice']=kwargs.pop('slice',data.pop('slice',(None,None)))
data['resample']=kwargs.pop('resample',data.pop('resample',None))
asFigure=kwargs.pop('asFigure',False)
showstudies=kwargs.pop('showstudies',True)
study_kwargs=utils.kwargs_from_keyword(kwargs,{},'study',True)
datalegend=kwargs.pop('datalegend',data.pop('datalegend',data.pop('showlegend',True)))
export_kwargs = utils.check_kwargs(kwargs,__QUANT_FIGURE_EXPORT)
_slice=data.pop('slice')
_resample=data.pop('resample')
panel_data={}
for k in ['min_panel_size','spacing','top_margin','bottom_margin']:
panel_data[k]=kwargs.pop(k,self.panels[k])
d=self_kwargs
df=self._get_sliced(_slice).copy()
if _resample:
if utils.is_list(_resample):
df=self._get_resampled(*_resample,df=df)
elif utils.is_dict(_resample):
_resample.update(df=df)
df=self._get_resampled(**_resample)
else:
df=self._get_resampled(_resample,df=df)
annotations=layout.pop('annotations')
shapes=layout.pop('shapes')
if not 'shapes' in shapes:
shapes['shapes']=[]
for trend in self.trendlines:
_trend=self._get_trendline(**trend)
shapes['shapes'].append(_trend['shape'])
if 'text' in _trend['annotation']:
annotations['values'].append(_trend['annotation'])
shape_kwargs=utils.check_kwargs(kwargs,get_shapes_kwargs(),{},clean_origin=True)
for k,v in list(shape_kwargs.items()):
if k in shapes:
if isinstance(v,list):
shapes[k].extend(v)
else:
shapes[k].append(v)
else:
shapes[k]=[v]
for _ in [data,layout, self._d,
self.theme,{'annotations':annotations['values']},
annotations['params'],shapes]:
if _:
d=utils.merge_dict(d,_)
d=utils.deep_update(d,kwargs)
d=tools.updateColors(d)
fig = df.figure(**d)
if d['kind'] not in ('candle','candlestick','ohlc'):
tools._move_axis(fig, yaxis='y2') # FIXME TKP
pass
else:
if not datalegend:
fig['data'][0]['decreasing'].update(showlegend=False)
fig['data'][0]['increasing'].update(showlegend=False)
## 126 Shapes in wrong axis
for shape in fig['layout']['shapes']:
if 'yref' in shape:
if len(shape['yref'])==1: #not an explicity yref
shape.update(yref='y2')
panel_data['n']=1
which = [x['yaxis'] for x in fig['data']]
which.sort()
max_panel=int(which[-1][1:])
figures=[]
if showstudies:
kwargs=utils.check_kwargs(kwargs,['theme','up_color','down_color'],{},False)
kwargs.update(**study_kwargs)
kwargs.update(slice=_slice,resample=_resample)
for k,v in list(self.studies.items()):
study_fig=self._get_study_figure(k,**kwargs)
study_fig=tools.fig_to_dict(study_fig)
if 'yaxis' in study_fig['layout']:
study_fig['layout']['yaxis1']=study_fig['layout']['yaxis'].copy()
del study_fig['layout']['yaxis']
if v['kind'] in ('boll','sma','ema','ptps'):
tools._move_axis(study_fig, yaxis='y2') # FIXME TKP
pass
if v['kind'] in ('rsi','volume','macd','atr','adx','cci','dmi'):
max_panel+=1
panel_data['n']+=1
tools._move_axis(study_fig, yaxis='y{0}'.format(max_panel)) # FIXME TKP
figures.append(study_fig)
figures.append(fig)
fig=tools.merge_figures(figures)
try:
fig['layout']['xaxis1']['anchor']='y2'
except:
fig['layout']['xaxis']['anchor']='y2'
domains=self._panel_domains(**panel_data)
try:
for k,v in list(domains.items()):
fig['layout'][k].update(v)
except:
fig['layout'].update(**domains)
if not d.get('rangeslider',False):
try:
del fig['layout']['yaxis1']
except:
pass
if asFigure:
return go.Figure(fig)
else:
return pt_iplot(fig, **export_kwargs)
def __getitem__(self,key):
return self.__dict__[key]
def __repr__(self):
_d=self.__dict__.copy()
del _d['df']
return json.dumps(_d,sort_keys=True, indent=4)
| mit |
frdb194/django | django/template/response.py | 84 | 5682 | from django.http import HttpResponse
from django.utils import six
from .loader import get_template, select_template
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status, charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, six.string_types):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super(TemplateResponse, self).__init__(
template, context, content_type, status, charset, using)
self._request = request
| bsd-3-clause |
rohanp/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 102 | 5177 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
dnanexus/dx-toolkit | src/python/dxpy/bindings/dxdataobject_functions.py | 1 | 10035 | # Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
These functions provide shorthand functional interfaces for actions such
as getting a :class:`~dxpy.bindings.DXDataObject` handler from an ID or
`link
<https://documentation.dnanexus.com/developer/api/data-object-lifecycle/details-and-links#linking>`_,
or creating a link from a handler. In addition, there are functions for
performing simple actions with an ID or link as input without creating a
full object handler.
'''
from __future__ import print_function, unicode_literals, division, absolute_import
import dxpy
from . import DXDataObject
from . import __dict__ as all_bindings
from ..exceptions import DXError
from ..compat import basestring
def dxlink(object_id, project_id=None, field=None):
'''
:param object_id: Object ID or the object handler itself
:type object_id: string or :class:`~dxpy.bindings.DXDataObject`
:param project_id: A project ID, if creating a cross-project DXLink
:type project_id: string
:param field: A field name, if creating a job-based object reference
:type field: string
:returns: A dict formatted as a symbolic DNAnexus object reference
:rtype: dict
Creates a DXLink to the specified object.
If `object_id` is already a link, it is returned without modification.
If `object_id is a `~dxpy.bindings.DXDataObject`, the object ID is
retrieved via its `get_id()` method.
If `field` is not `None`, `object_id` is expected to be of class 'job'
and the link created is a Job Based Object Reference (JBOR), which is
of the form::
{'$dnanexus_link': {'job': object_id, 'field': field}}
If `field` is `None` and `project_id` is not `None`, the link created
is a project-specific link of the form::
{'$dnanexus_link': {'project': project_id, 'id': object_id}}
'''
if is_dxlink(object_id):
return object_id
if isinstance(object_id, DXDataObject):
object_id = object_id.get_id()
if not any((project_id, field)):
return {'$dnanexus_link': object_id}
elif field:
dxpy.verify_string_dxid(object_id, "job")
return {'$dnanexus_link': {'job': object_id, 'field': field}}
else:
return {'$dnanexus_link': {'project': project_id, 'id': object_id}}
def is_dxlink(x):
'''
:param x: A potential DNAnexus link
Returns whether *x* appears to be a DNAnexus link (is a dict with
key ``"$dnanexus_link"``) with a referenced data object.
'''
if not isinstance(x, dict):
return False
if '$dnanexus_link' not in x:
return False
link = x['$dnanexus_link']
if isinstance(link, basestring):
return True
elif isinstance(link, dict):
return any(key in link for key in ('id', 'job'))
return False
def get_dxlink_ids(link):
'''
:param link: A DNAnexus link
:type link: dict
:returns: (Object ID, Project ID) if the link is to a data object (or :const:`None`
if no project specified in the link), or (Job ID, Field) if the link is
a job-based object reference (JBOR).
:rtype: tuple
Get the object ID and detail from a link. There are three types of links:
* Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns
``("file-XXXX", None)``.
* Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX",
"project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``.
* Job-based object reference (JBOR) of the form ``{"$dnanexus_link":
{"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``.
'''
if not is_dxlink(link):
raise DXError('Invalid link: %r' % link)
if isinstance(link['$dnanexus_link'], basestring):
return link['$dnanexus_link'], None
elif 'id' in link['$dnanexus_link']:
return link['$dnanexus_link']['id'], link['$dnanexus_link'].get('project')
else:
return link['$dnanexus_link']['job'], link['$dnanexus_link']['field']
def _guess_link_target_type(id_or_link):
# Get the object ID if the input is a link
object_id = get_dxlink_ids(id_or_link)[0] if is_dxlink(id_or_link) else id_or_link
class_name = 'DX' + object_id.split("-", 1)[0].capitalize()
if class_name not in all_bindings:
class_name = {
'DXGlobalworkflow': 'DXGlobalWorkflow'
}.get(class_name)
if class_name not in all_bindings:
raise DXError("Invalid class name: %s", class_name)
cls = all_bindings[class_name]
return cls
def get_handler(id_or_link, project=None):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink
:type id_or_link: string or dict
:param project: String project ID to use as the context if the the object is a data object
:type project: string
:rtype: :class:`~dxpy.bindings.DXObject`, :class:`~dxpy.bindings.DXApp`, or :class:`~dxpy.bindings.DXGlobalWorkflow`
Parses a string or DXLink dict. Creates and returns an object handler for it.
Example::
get_handler("file-1234")
'''
try:
cls = _guess_link_target_type(id_or_link)
except Exception as e:
raise DXError("Could not parse link {}: {}".format(id_or_link, e))
if cls in [dxpy.DXApp, dxpy.DXGlobalWorkflow]:
# This special case should translate identifiers of the form
# "app-name" or "app-name/version_or_tag" to the appropriate
# arguments
if dxpy.utils.resolver.is_hashid(id_or_link):
return cls(id_or_link)
else:
slash_pos = id_or_link.find('/')
dash_pos = id_or_link.find('-')
if slash_pos == -1:
return cls(name=id_or_link[dash_pos+1:])
else:
return cls(name=id_or_link[dash_pos+1:slash_pos],
alias=id_or_link[slash_pos + 1:])
elif project is None or cls in [dxpy.DXJob, dxpy.DXAnalysis, dxpy.DXProject, dxpy.DXContainer]:
# This case is important for the handlers which do not
# take a project field
return cls(id_or_link)
else:
return cls(id_or_link, project=project)
def describe(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink,
or a list of object IDs or dicts containing a DXLink.
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.describe` on the object.
Example::
describe("file-1234")
Given a list of object IDs, calls :meth:`~dxpy.api.system_describe_data_objects`.
Example::
describe(["file-1234", "workflow-5678"])
Note: If id_or_link is a list and **kwargs contains a "fields" parameter, these
fields will be returned in the response for each data object in addition to the
fields included by default. Additionally, describe options can be provided for
each data object class in the "classDescribeOptions" kwargs argument. See
https://documentation.dnanexus.com/developer/api/system-methods#api-method-system-describedataobjects
for input parameters used with the multiple object describe method.
'''
# If this is a list, extract the ids.
# TODO: modify the procedure to use project ID when possible
if isinstance(id_or_link, basestring) or is_dxlink(id_or_link):
handler = get_handler(id_or_link)
return handler.describe(**kwargs)
else:
links = []
for link in id_or_link:
# If this entry is a dxlink, then get the id.
if is_dxlink(link):
# Guaranteed by is_dxlink that one of the following will work
if isinstance(link['$dnanexus_link'], basestring):
link = link['$dnanexus_link']
else:
link = link['$dnanexus_link']['id']
links.append(link)
# Prepare input to system_describe_data_objects, the same fields will be passed
# for all data object classes; if a class doesn't include a field in its describe
# output, it will be ignored
describe_input = \
dict([(field, True) for field in kwargs['fields']]) if kwargs.get('fields', []) else True
describe_links_input = [{'id': link, 'describe': describe_input} for link in links]
bulk_describe_input = {'objects': describe_links_input}
if 'classDescribeOptions' in kwargs:
bulk_describe_input['classDescribeOptions'] = kwargs['classDescribeOptions']
data_object_descriptions = dxpy.api.system_describe_data_objects(bulk_describe_input)
return [desc['describe'] for desc in data_object_descriptions['results']]
def get_details(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.get_details` on the object.
Example::
get_details("file-1234")
'''
handler = get_handler(id_or_link)
return handler.get_details(**kwargs)
def remove(id_or_link, **kwargs):
'''
:param id_or_link: String containing an object ID or dict containing a DXLink
Given an object ID, calls :meth:`~dxpy.bindings.DXDataObject.remove` on the object.
Example::
remove("file-1234")
'''
handler = get_handler(id_or_link)
return handler.remove(**kwargs)
| apache-2.0 |
moonboots/tensorflow | tensorflow/python/ops/nn_grad.py | 4 | 12062 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(
grad, array_ops.shape(op.inputs[1]), op.inputs[2],
op.get_attr("strides"), op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")),
nn_ops.conv2d(
grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.pack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[1],
dtype=dtypes.int32)]
| apache-2.0 |
stefanfoulis/django-cms | cms/south_migrations/0028_limit_visibility_in_menu_step1of3.py | 1680 | 20032 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
darknao/btClubSportWheel | tools/arm/arm-none-eabi/share/gdb/python/gdb/command/pretty_printers.py | 137 | 14474 | # Pretty-printer commands.
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with pretty-printers."""
import copy
import gdb
import re
def parse_printer_regexps(arg):
"""Internal utility to parse a pretty-printer command argv.
Arguments:
arg: The arguments to the command. The format is:
[object-regexp [name-regexp]].
Individual printers in a collection are named as
printer-name;subprinter-name.
Returns:
The result is a 3-tuple of compiled regular expressions, except that
the resulting compiled subprinter regexp is None if not provided.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
object_regexp = "" # match everything
name_regexp = "" # match everything
subname_regexp = None
if argc > 3:
raise SyntaxError("too many arguments")
if argc >= 1:
object_regexp = argv[0]
if argc >= 2:
name_subname = argv[1].split(";", 1)
name_regexp = name_subname[0]
if len(name_subname) == 2:
subname_regexp = name_subname[1]
# That re.compile raises SyntaxError was determined empirically.
# We catch it and reraise it to provide a slightly more useful
# error message for the user.
try:
object_re = re.compile(object_regexp)
except SyntaxError:
raise SyntaxError("invalid object regexp: %s" % object_regexp)
try:
name_re = re.compile (name_regexp)
except SyntaxError:
raise SyntaxError("invalid name regexp: %s" % name_regexp)
if subname_regexp is not None:
try:
subname_re = re.compile(subname_regexp)
except SyntaxError:
raise SyntaxError("invalid subname regexp: %s" % subname_regexp)
else:
subname_re = None
return(object_re, name_re, subname_re)
def printer_enabled_p(printer):
"""Internal utility to see if printer (or subprinter) is enabled."""
if hasattr(printer, "enabled"):
return printer.enabled
else:
return True
class InfoPrettyPrinter(gdb.Command):
"""GDB command to list all registered pretty-printers.
Usage: info pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to list.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__ (self):
super(InfoPrettyPrinter, self).__init__("info pretty-printer",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(printer):
"""Return "" if PRINTER is enabled, otherwise " [disabled]"."""
if printer_enabled_p(printer):
return ""
else:
return " [disabled]"
@staticmethod
def printer_name(printer):
"""Return the printer's name."""
if hasattr(printer, "name"):
return printer.name
if hasattr(printer, "__name__"):
return printer.__name__
# This "shouldn't happen", but the public API allows for
# direct additions to the pretty-printer list, and we shouldn't
# crash because someone added a bogus printer.
# Plus we want to give the user a way to list unknown printers.
return "unknown"
def list_pretty_printers(self, pretty_printers, name_re, subname_re):
"""Print a list of pretty-printers."""
# A potential enhancement is to provide an option to list printers in
# "lookup order" (i.e. unsorted).
sorted_pretty_printers = sorted (copy.copy(pretty_printers),
key = self.printer_name)
for printer in sorted_pretty_printers:
name = self.printer_name(printer)
enabled = self.enabled_string(printer)
if name_re.match(name):
print (" %s%s" % (name, enabled))
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
sorted_subprinters = sorted (copy.copy(printer.subprinters),
key = self.printer_name)
for subprinter in sorted_subprinters:
if (not subname_re or
subname_re.match(subprinter.name)):
print (" %s%s" %
(subprinter.name,
self.enabled_string(subprinter)))
def invoke1(self, title, printer_list,
obj_name_to_match, object_re, name_re, subname_re):
"""Subroutine of invoke to simplify it."""
if printer_list and object_re.match(obj_name_to_match):
print (title)
self.list_pretty_printers(printer_list, name_re, subname_re)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
self.invoke1("global pretty-printers:", gdb.pretty_printers,
"global", object_re, name_re, subname_re)
cp = gdb.current_progspace()
self.invoke1("progspace %s pretty-printers:" % cp.filename,
cp.pretty_printers, "progspace",
object_re, name_re, subname_re)
for objfile in gdb.objfiles():
self.invoke1(" objfile %s pretty-printers:" % objfile.filename,
objfile.pretty_printers, objfile.filename,
object_re, name_re, subname_re)
def count_enabled_printers(pretty_printers):
"""Return a 2-tuple of number of enabled and total printers."""
enabled = 0
total = 0
for printer in pretty_printers:
if (hasattr(printer, "subprinters")
and printer.subprinters is not None):
if printer_enabled_p(printer):
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
enabled += 1
total += len(printer.subprinters)
else:
if printer_enabled_p(printer):
enabled += 1
total += 1
return (enabled, total)
def count_all_enabled_printers():
"""Return a 2-tuble of the enabled state and total number of all printers.
This includes subprinters.
"""
enabled_count = 0
total_count = 0
(t_enabled, t_total) = count_enabled_printers(gdb.pretty_printers)
enabled_count += t_enabled
total_count += t_total
(t_enabled, t_total) = count_enabled_printers(gdb.current_progspace().pretty_printers)
enabled_count += t_enabled
total_count += t_total
for objfile in gdb.objfiles():
(t_enabled, t_total) = count_enabled_printers(objfile.pretty_printers)
enabled_count += t_enabled
total_count += t_total
return (enabled_count, total_count)
def pluralize(text, n, suffix="s"):
"""Return TEXT pluralized if N != 1."""
if n != 1:
return "%s%s" % (text, suffix)
else:
return text
def show_pretty_printer_enabled_summary():
"""Print the number of printers enabled/disabled.
We count subprinters individually.
"""
(enabled_count, total_count) = count_all_enabled_printers()
print ("%d of %d printers enabled" % (enabled_count, total_count))
def do_enable_pretty_printer_1 (pretty_printers, name_re, subname_re, flag):
"""Worker for enabling/disabling pretty-printers.
Arguments:
pretty_printers: list of pretty-printers
name_re: regular-expression object to select printers
subname_re: regular expression object to select subprinters or None
if all are affected
flag: True for Enable, False for Disable
Returns:
The number of printers affected.
This is just for informational purposes for the user.
"""
total = 0
for printer in pretty_printers:
if (hasattr(printer, "name") and name_re.match(printer.name) or
hasattr(printer, "__name__") and name_re.match(printer.__name__)):
if (hasattr(printer, "subprinters") and
printer.subprinters is not None):
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
for subprinter in printer.subprinters:
if printer_enabled_p(subprinter):
total += 1
# NOTE: We preserve individual subprinter settings.
printer.enabled = flag
else:
# NOTE: Whether this actually disables the subprinter
# depends on whether the printer's lookup function supports
# the "enable" API. We can only assume it does.
for subprinter in printer.subprinters:
if subname_re.match(subprinter.name):
# Only record printers that change state.
if (printer_enabled_p(printer) and
printer_enabled_p(subprinter) != flag):
total += 1
subprinter.enabled = flag
else:
# This printer has no subprinters.
# If the user does "disable pretty-printer .* .* foo"
# should we disable printers that don't have subprinters?
# How do we apply "foo" in this context? Since there is no
# "foo" subprinter it feels like we should skip this printer.
# There's still the issue of how to handle
# "disable pretty-printer .* .* .*", and every other variation
# that can match everything. For now punt and only support
# "disable pretty-printer .* .*" (i.e. subname is elided)
# to disable everything.
if not subname_re:
# Only record printers that change state.
if printer_enabled_p(printer) != flag:
total += 1
printer.enabled = flag
return total
def do_enable_pretty_printer (arg, flag):
"""Internal worker for enabling/disabling pretty-printers."""
(object_re, name_re, subname_re) = parse_printer_regexps(arg)
total = 0
if object_re.match("global"):
total += do_enable_pretty_printer_1(gdb.pretty_printers,
name_re, subname_re, flag)
cp = gdb.current_progspace()
if object_re.match("progspace"):
total += do_enable_pretty_printer_1(cp.pretty_printers,
name_re, subname_re, flag)
for objfile in gdb.objfiles():
if object_re.match(objfile.filename):
total += do_enable_pretty_printer_1(objfile.pretty_printers,
name_re, subname_re, flag)
if flag:
state = "enabled"
else:
state = "disabled"
print ("%d %s %s" % (total, pluralize("printer", total), state))
# Print the total list of printers currently enabled/disabled.
# This is to further assist the user in determining whether the result
# is expected. Since we use regexps to select it's useful.
show_pretty_printer_enabled_summary()
# Enable/Disable one or more pretty-printers.
#
# This is intended for use when a broken pretty-printer is shipped/installed
# and the user wants to disable that printer without disabling all the other
# printers.
#
# A useful addition would be -v (verbose) to show each printer affected.
class EnablePrettyPrinter (gdb.Command):
"""GDB command to enable the specified pretty-printer.
Usage: enable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(EnablePrettyPrinter, self).__init__("enable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, True)
class DisablePrettyPrinter (gdb.Command):
"""GDB command to disable the specified pretty-printer.
Usage: disable pretty-printer [object-regexp [name-regexp]]
OBJECT-REGEXP is a regular expression matching the objects to examine.
Objects are "global", the program space's file, and the objfiles within
that program space.
NAME-REGEXP matches the name of the pretty-printer.
Individual printers in a collection are named as
printer-name;subprinter-name.
"""
def __init__(self):
super(DisablePrettyPrinter, self).__init__("disable pretty-printer",
gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_pretty_printer(arg, False)
def register_pretty_printer_commands():
"""Call from a top level script to install the pretty-printer commands."""
InfoPrettyPrinter()
EnablePrettyPrinter()
DisablePrettyPrinter()
register_pretty_printer_commands()
| gpl-2.0 |
garimakhulbe/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureReport/setup.py | 10 | 1137 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestreportserviceforazure"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.4.0", "msrestazure>=0.4.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestReportServiceForAzure",
author_email="",
url="",
keywords=["Swagger", "AutoRestReportServiceForAzure"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
| mit |
joshcooper/nogotofail | nogotofail/mitm/connection/handlers/data/xmpp.py | 6 | 5469 | r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from nogotofail.mitm.connection.handlers.data import handlers
from nogotofail.mitm.connection.handlers.data import ClientReportDetection
from nogotofail.mitm.connection.handlers.data import DataHandler
from nogotofail.mitm.connection.handlers.store import handler
from nogotofail.mitm.event import connection
from nogotofail.mitm import util
import re
@handler(handlers)
class XmppStartTlsStripHandler(DataHandler):
name = "xmppstarttlsstrip"
description = "Suppress STARTTLS in XMPP streams"
first_chunk_checked = False
xmpp_detected = False
starttls_feature_stripped = False
vuln_notified = False
def on_request(self, request):
return self.on_chunk_received(request)
def on_response(self, response):
return self.on_chunk_received(response)
def on_chunk_received(self, data):
if not self.first_chunk_checked:
self.first_chunk_checked = True
self.xmpp_detected = self.is_xmpp_start(data)
if self.xmpp_detected:
self.log(logging.DEBUG, "XMPP detected")
if not self.xmpp_detected:
return data
# Consider dropping TLS/SSL. However, this will likely destroy
# connectivity if STARTTLS stripping does not work.
# if data[0] == 0x16:
# self.log(logging.INFO, "Dropping TLS/SSL chunk")
# return ""
if self.starttls_feature_stripped:
# Ignore/pass through starttls, proceed, and failure messages
if (data == '<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls"/>' or
data == '<proceed xmlns="urn:ietf:params:xml:ns:xmpp-tls"/>' or
data == '<failure xmlns="urn:ietf:params:xml:ns:xmpp-tls"/>'):
return data
if not self.vuln_notified:
self.log(logging.CRITICAL, "Cleartext traffic after stripped STARTTLS")
self.log_event(
logging.ERROR,
connection.AttackEvent(
self.connection, self.name, True,
None))
self.connection.vuln_notify(util.vuln.VULN_XMPP_STARTTLS_STRIP)
self.vuln_notified = True
if not self.is_stream_features_present(data):
return data
self.log(logging.DEBUG, "XMPP stream features detected")
if not self.is_starttls_feature_present(data):
self.log(logging.WARNING, "XMPP STARTTLS feature missing")
return data
modified_data = self.strip_starttls_feature(data)
if data == modified_data:
self.log(logging.WARNING, "Failed to strip XMPP STARTTLS")
return data
self.starttls_feature_stripped = True
self.log(logging.INFO, "Stripped XMPP STARTTLS")
return modified_data
def is_xmpp_start(self, data):
return data.startswith("<stream:stream")
def is_stream_features_present(self, data):
return data.find("<stream:features") != -1
def is_starttls_feature_present(self, data):
return data.find("<starttls") != -1
def strip_starttls_feature(self, data):
start_index = data.find("<starttls")
if start_index == -1:
return data
end_index = data.find("/starttls>", start_index)
if end_index != -1:
end_index += len("/starttls>")
end_index2 = data.find(">", start_index)
if end_index2 != -1 and data[end_index2 - 1] == '/':
if end_index == -1 or end_index2 < end_index:
end_index = end_index2
return data[:start_index] + data[end_index:]
@handler.passive(handlers)
class XmppAuthHandler(DataHandler):
name = "xmppauthdetection"
description = "Detect authentication credentials in XMPP traffic"
first_chunk_checked = False
xmpp_detected = False
def on_request(self, request):
return self.on_chunk_received(request)
def on_response(self, response):
return self.on_chunk_received(response)
def on_chunk_received(self, data):
if not self.first_chunk_checked:
self.first_chunk_checked = True
self.xmpp_detected = self.is_xmpp_start(data)
if self.xmpp_detected:
self.log(logging.DEBUG, "XMPP detected")
if not self.xmpp_detected:
return data
if "<auth " in data:
self.log(
logging.CRITICAL,
"Authentication credentials in XMPP traffic")
self.log_event(
logging.ERROR,
connection.AttackEvent(
self.connection, self.name, True,
None))
self.connection.vuln_notify(util.vuln.VULN_CLEARTEXT_AUTH)
return data
def is_xmpp_start(self, data):
return data.startswith("<stream:stream")
| apache-2.0 |
BanBxda/Linux-3.0.X | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
sebrandon1/nova | plugins/xenserver/xenapi/etc/xapi.d/plugins/ipxe.py | 13 | 4225 | #!/usr/bin/env python
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features
# TODO(sfinucan): Resolve all 'noqa' items once the above is no longer true
"""Inject network configuration into iPXE ISO for boot."""
import logging
import os
import shutil
import utils
# FIXME(sirp): should this use pluginlib from 5.6?
import pluginlib_nova
pluginlib_nova.configure_logging('ipxe')
ISOLINUX_CFG = """SAY iPXE ISO boot image
TIMEOUT 30
DEFAULT ipxe.krn
LABEL ipxe.krn
KERNEL ipxe.krn
INITRD netcfg.ipxe
"""
NETCFG_IPXE = """#!ipxe
:start
imgfree
ifclose net0
set net0/ip %(ip_address)s
set net0/netmask %(netmask)s
set net0/gateway %(gateway)s
set dns %(dns)s
ifopen net0
goto menu
:menu
chain %(boot_menu_url)s
goto boot
:boot
sanboot --no-describe --drive 0x80
"""
def _write_file(filename, data):
# If the ISO was tampered with such that the destination is a symlink,
# that could allow a malicious user to write to protected areas of the
# dom0 filesystem. /HT to comstud for pointing this out.
#
# Short-term, checking that the destination is not a symlink should be
# sufficient.
#
# Long-term, we probably want to perform all file manipulations within a
# chroot jail to be extra safe.
if os.path.islink(filename):
raise RuntimeError('SECURITY: Cannot write to symlinked destination')
logging.debug("Writing to file '%s'" % filename)
f = open(filename, 'w')
try:
f.write(data)
finally:
f.close()
def _unbundle_iso(sr_path, filename, path):
logging.debug("Unbundling ISO '%s'" % filename)
read_only_path = utils.make_staging_area(sr_path)
try:
utils.run_command(['mount', '-o', 'loop', filename, read_only_path])
try:
shutil.copytree(read_only_path, path)
finally:
utils.run_command(['umount', read_only_path])
finally:
utils.cleanup_staging_area(read_only_path)
def _create_iso(mkisofs_cmd, filename, path):
logging.debug("Creating ISO '%s'..." % filename)
orig_dir = os.getcwd()
os.chdir(path)
try:
utils.run_command([mkisofs_cmd, '-quiet', '-l', '-o', filename,
'-c', 'boot.cat', '-b', 'isolinux.bin',
'-no-emul-boot', '-boot-load-size', '4',
'-boot-info-table', '.'])
finally:
os.chdir(orig_dir)
def inject(session, sr_path, vdi_uuid, boot_menu_url, ip_address, netmask,
gateway, dns, mkisofs_cmd):
iso_filename = '%s.img' % os.path.join(sr_path, 'iso', vdi_uuid)
# Create staging area so we have a unique path but remove it since
# shutil.copytree will recreate it
staging_path = utils.make_staging_area(sr_path)
utils.cleanup_staging_area(staging_path)
try:
_unbundle_iso(sr_path, iso_filename, staging_path)
# Write Configs
_write_file(os.path.join(staging_path, 'netcfg.ipxe'),
NETCFG_IPXE % {"ip_address": ip_address,
"netmask": netmask,
"gateway": gateway,
"dns": dns,
"boot_menu_url": boot_menu_url})
_write_file(os.path.join(staging_path, 'isolinux.cfg'),
ISOLINUX_CFG)
_create_iso(mkisofs_cmd, iso_filename, staging_path)
finally:
utils.cleanup_staging_area(staging_path)
if __name__ == "__main__":
utils.register_plugin_calls(inject)
| apache-2.0 |
lociii/googleads-python-lib | examples/adspygoogle/dfa/v1_20/upload_in_stream_asset.py | 3 | 2858 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example uploads an In-Stream video asset into an existing In-Stream
video creative. To create an In-Stream video creative, run
create_in_stream_video_creative.py.
This example creates a media file in the target creative because the
'mediaFile' flag on the InStreamAssetUploadRequest was set to 'true'. You can
use the same workflow to upload companion ads or non-linear ads to your creative
by setting the 'companion' or 'nonLinear' flags instead, respectively. Only one
flag may be set per upload request.
Tags: creative.uploadInStreamAsset
"""
__author__ = '[email protected] (Joseph DiLallo)'
import base64
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
from adspygoogle.common import Utils
# Set the parameters for the In-Stream video asset.
ASSET_NAME = 'INSERT_ASSET_NAME_HERE'
PATH_TO_FILE = 'INSERT_PATH_TO_FILE_HERE'
IN_STREAM_VIDEO_CREATIVE_ID = 'INSERT_IN_STREAM_VIDEO_CREATIVE_ID_HERE'
def main(client, asset_name, path_to_file, in_stream_video_creative_id):
# Initialize appropriate service.
creative_service = client.GetCreativeService(
'https://advertisersapitest.doubleclick.net', 'v1.20')
# Convert file into format that can be sent in SOAP messages.
content = Utils.ReadFile(path_to_file)
content = base64.encodestring(content)
# Create the In-Stream video creative asset.
in_stream_video_asset = {
'name': asset_name,
'content': content,
}
# Create an upload request to make this asset a media file for an existing
# In-Stream creative.
in_stream_asset_upload_request = {
'mediaFile': 'true',
'inStreamAsset': in_stream_video_asset,
'creativeId': in_stream_video_creative_id
}
# Save the media file.
result = creative_service.UploadInStreamAsset(
in_stream_asset_upload_request)[0]
# Display a success message.
print ('Added a media file to In-Stream video creative with ID \'%s\'.'
% result['Id'])
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, ASSET_NAME, PATH_TO_FILE, IN_STREAM_VIDEO_CREATIVE_ID)
| apache-2.0 |
rebost/django | django/contrib/sitemaps/tests/http.py | 8 | 5922 | from __future__ import unicode_literals
import os
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
class HTTPSitemapTests(SitemapTestsBase):
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), 'templates'),)
)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today()))
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today()))
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), 'templates'),)
)
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today()))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today())
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS,
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
user_sitemap = GenericSitemap({'queryset': User.objects.all()})
def is_user(url):
return isinstance(url['item'], User)
item_in_url_info = all(map(is_user, user_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
| bsd-3-clause |
leighpauls/k2cro4 | third_party/closure_linter/closure_linter/gjslint.py | 135 | 7991 | #!/usr/bin/env python
# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import functools
import itertools
import sys
import time
import gflags as flags
from closure_linter import checker
from closure_linter import errorrecord
from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags
# Attempt import of multiprocessing (should be available in Python 2.6 and up).
try:
# pylint: disable-msg=C6204
import multiprocessing
except ImportError:
multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
'Whether to emit warnings in standard unix format.')
flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('multiprocess', False,
'Whether to parallalize linting using the '
'multiprocessing module. Disabled by default.')
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary']
def _MultiprocessCheckPaths(paths):
"""Run _CheckPath over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
for results in pool.imap(_CheckPath, paths):
for record in results:
yield record
pool.close()
pool.join()
def _CheckPaths(paths):
"""Run _CheckPath on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in paths:
results = _CheckPath(path)
for record in results:
yield record
def _CheckPath(path):
"""Check a path and return any errors.
Args:
path: paths to check.
Returns:
A list of errorrecord.ErrorRecords for any found errors.
"""
error_accumulator = erroraccumulator.ErrorAccumulator()
style_checker = checker.JavaScriptStyleChecker(error_accumulator)
style_checker.Check(path)
# Return any errors as error records.
make_error_record = functools.partial(errorrecord.MakeErrorRecord, path)
return map(make_error_record, error_accumulator.GetErrors())
def _GetFilePaths(argv):
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
return fileflags.GetFileList(argv, 'JavaScript', suffixes)
# Error printing functions
def _PrintFileSummary(paths, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print '%s: %d' % (path, len(path_errors))
def _PrintFileSeparator(path):
print '----- FILE : %s -----' % path
def _PrintSummary(paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print '%d files checked, no errors found.' % all_paths_count
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if error_count or new_error_count:
print ('Found %d errors, including %d new errors, in %d files '
'(%d files OK).' % (
error_count,
new_error_count,
error_paths_count,
no_error_paths_count))
def _PrintErrorRecords(error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not FLAGS.unix_mode:
_PrintFileSeparator(current_path)
print record.error_string
def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
t: A duration in seconds.
Returns:
A formatted duration string.
"""
if t < 1:
return '%dms' % round(t * 1000)
else:
return '%.2fs' % t
def main(argv = None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
if FLAGS.time:
start_time = time.time()
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
if FLAGS.multiprocess:
records_iter = _MultiprocessCheckPaths(paths)
else:
records_iter = _CheckPaths(paths)
records_iter, records_iter_copy = itertools.tee(records_iter, 2)
_PrintErrorRecords(records_iter_copy)
error_records = list(records_iter)
_PrintSummary(paths, error_records)
exit_code = 0
# If there are any errors
if error_records:
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if FLAGS.summary:
_PrintFileSummary(paths, error_records)
if FLAGS.beep:
# Make a beep noise.
sys.stdout.write(chr(7))
# Write out instructions for using fixjsstyle script to fix some of the
# reported errors.
fix_args = []
for flag in sys.argv[1:]:
for f in GJSLINT_ONLY_FLAGS:
if flag.startswith(f):
break
else:
fix_args.append(flag)
print """
Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args)
if FLAGS.time:
print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/identity/azure-identity/azure/identity/_credentials/environment.py | 1 | 5957 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import os
from .. import CredentialUnavailableError
from .._constants import EnvironmentVariables
from .._internal.decorators import log_get_token
from .certificate import CertificateCredential
from .client_secret import ClientSecretCredential
from .user_password import UsernamePasswordCredential
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Mapping, Optional, Union
from azure.core.credentials import AccessToken
EnvironmentCredentialTypes = Union["CertificateCredential", "ClientSecretCredential", "UsernamePasswordCredential"]
_LOGGER = logging.getLogger(__name__)
class EnvironmentCredential(object):
"""A credential configured by environment variables.
This credential is capable of authenticating as a service principal using a client secret or a certificate, or as
a user with a username and password. Configuration is attempted in this order, using these environment variables:
Service principal with secret:
- **AZURE_TENANT_ID**: ID of the service principal's tenant. Also called its 'directory' ID.
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_SECRET**: one of the service principal's client secrets
Service principal with certificate:
- **AZURE_TENANT_ID**: ID of the service principal's tenant. Also called its 'directory' ID.
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_CERTIFICATE_PATH**: path to a PEM-encoded certificate file including the private key. The
certificate must not be password-protected.
User with username and password:
- **AZURE_CLIENT_ID**: the application's client ID
- **AZURE_USERNAME**: a username (usually an email address)
- **AZURE_PASSWORD**: that user's password
- **AZURE_TENANT_ID**: (optional) ID of the service principal's tenant. Also called its 'directory' ID.
If not provided, defaults to the 'organizations' tenant, which supports only Azure Active Directory work or
school accounts.
:keyword bool allow_multitenant_authentication: when True, enables the credential to acquire tokens from any tenant
the application or user is registered in. When False, which is the default, the credential will acquire tokens
only from the tenant specified by **AZURE_TENANT_ID**.
"""
def __init__(self, **kwargs):
# type: (Mapping[str, Any]) -> None
self._credential = None # type: Optional[EnvironmentCredentialTypes]
if all(os.environ.get(v) is not None for v in EnvironmentVariables.CLIENT_SECRET_VARS):
self._credential = ClientSecretCredential(
client_id=os.environ[EnvironmentVariables.AZURE_CLIENT_ID],
client_secret=os.environ[EnvironmentVariables.AZURE_CLIENT_SECRET],
tenant_id=os.environ[EnvironmentVariables.AZURE_TENANT_ID],
**kwargs
)
elif all(os.environ.get(v) is not None for v in EnvironmentVariables.CERT_VARS):
self._credential = CertificateCredential(
client_id=os.environ[EnvironmentVariables.AZURE_CLIENT_ID],
tenant_id=os.environ[EnvironmentVariables.AZURE_TENANT_ID],
certificate_path=os.environ[EnvironmentVariables.AZURE_CLIENT_CERTIFICATE_PATH],
**kwargs
)
elif all(os.environ.get(v) is not None for v in EnvironmentVariables.USERNAME_PASSWORD_VARS):
self._credential = UsernamePasswordCredential(
client_id=os.environ[EnvironmentVariables.AZURE_CLIENT_ID],
username=os.environ[EnvironmentVariables.AZURE_USERNAME],
password=os.environ[EnvironmentVariables.AZURE_PASSWORD],
tenant_id=os.environ.get(EnvironmentVariables.AZURE_TENANT_ID), # optional for username/password auth
**kwargs
)
if self._credential:
_LOGGER.info("Environment is configured for %s", self._credential.__class__.__name__)
else:
expected_variables = set(
EnvironmentVariables.CERT_VARS
+ EnvironmentVariables.CLIENT_SECRET_VARS
+ EnvironmentVariables.USERNAME_PASSWORD_VARS
)
set_variables = [v for v in expected_variables if v in os.environ]
if set_variables:
_LOGGER.warning(
"Incomplete environment configuration. These variables are set: %s", ", ".join(set_variables)
)
else:
_LOGGER.info("No environment configuration found.")
@log_get_token("EnvironmentCredential")
def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes`.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
is False, specifying a tenant with this argument may raise an exception.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.identity.CredentialUnavailableError: environment variable configuration is incomplete
"""
if not self._credential:
message = (
"EnvironmentCredential authentication unavailable. Environment variables are not fully configured."
)
raise CredentialUnavailableError(message=message)
return self._credential.get_token(*scopes, **kwargs)
| mit |
dllsf/odootest | openerp/addons/base/module/wizard/__init__.py | 365 | 1250 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_module_update
import base_language_install
import base_import_language
import base_module_upgrade
import base_module_configuration
import base_export_language
import base_update_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
interlegis/saap | config/rest_framework/pagination.py | 1 | 30293 | # coding: utf-8
"""
Pagination serializers determine the structure of the output that should
be used for paginated responses.
"""
from __future__ import unicode_literals
from base64 import b64decode, b64encode
from collections import OrderedDict, namedtuple
from django.core.paginator import InvalidPage
from django.core.paginator import Paginator as DjangoPaginator
from django.template import loader
from django.utils import six
from django.utils.encoding import force_text
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import coreapi, coreschema
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import remove_query_param, replace_query_param
def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret
def _divide_with_ceil(a, b):
"""
Returns 'a' divided by 'b', with any remainder rounded up.
"""
if a % b:
return (a // b) + 1
return a // b
def _get_displayed_page_numbers(current, final):
"""
This utility function determines a list of page numbers to display.
This gives us a nice contextually relevant set of page numbers.
For example:
current=14, final=16 -> [1, None, 13, 14, 15, 16]
This implementation gives one page to each side of the cursor,
or two pages to the side when the cursor is at the edge, then
ensures that any breaks between non-continuous page numbers never
remove only a single page.
For an alternative implementation which gives two pages to each side of
the cursor, eg. as in GitHub issue list pagination, see:
https://gist.github.com/tomchristie/321140cebb1c4a558b15
"""
assert current >= 1
assert final >= current
if final <= 5:
return list(range(1, final + 1))
# We always include the first two pages, last two pages, and
# two pages either side of the current page.
included = {1, current - 1, current, current + 1, final}
# If the break would only exclude a single page number then we
# may as well include the page number instead of the break.
if current <= 4:
included.add(2)
included.add(3)
if current >= final - 3:
included.add(final - 1)
included.add(final - 2)
# Now sort the page numbers and drop anything outside the limits.
included = [
idx for idx in sorted(list(included))
if 0 < idx <= final
]
# Finally insert any `...` breaks
if current > 4:
included.insert(1, None)
if current < final - 3:
included.insert(len(included) - 1, None)
return included
def _get_page_links(page_numbers, current, url_func):
"""
Given a list of page numbers and `None` page breaks,
return a list of `PageLink` objects.
"""
page_links = []
for page_number in page_numbers:
if page_number is None:
page_link = PAGE_BREAK
else:
page_link = PageLink(
url=url_func(page_number),
number=page_number,
is_active=(page_number == current),
is_break=False
)
page_links.append(page_link)
return page_links
def _reverse_ordering(ordering_tuple):
"""
Given an order_by tuple such as `('-created', 'uuid')` reverse the
ordering and return a new tuple, eg. `('created', '-uuid')`.
"""
def invert(x):
return x[1:] if x.startswith('-') else '-' + x
return tuple([invert(item) for item in ordering_tuple])
Cursor = namedtuple('Cursor', ['offset', 'reverse', 'position'])
PageLink = namedtuple('PageLink', ['url', 'number', 'is_active', 'is_break'])
PAGE_BREAK = PageLink(url=None, number=None, is_active=False, is_break=True)
class BasePagination(object):
display_page_controls = False
def paginate_queryset(self, queryset, request, view=None): # pragma: no cover
raise NotImplementedError('paginate_queryset() must be implemented.')
def get_paginated_response(self, data): # pragma: no cover
raise NotImplementedError('get_paginated_response() must be implemented.')
def to_html(self): # pragma: no cover
raise NotImplementedError('to_html() must be implemented to display page controls.')
def get_results(self, data):
return data['results']
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
return []
class PageNumberPagination(BasePagination):
"""
A simple page number based style that supports page numbers as
query parameters. For example:
http://api.example.org/accounts/?page=4
http://api.example.org/accounts/?page=4&page_size=100
"""
# The default page size.
# Defaults to `None`, meaning pagination is disabled.
page_size = api_settings.PAGE_SIZE
django_paginator_class = DjangoPaginator
# Client can control the page using this query parameter.
page_query_param = 'page'
page_query_description = _('A page number within the paginated result set.')
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = None
page_size_query_description = _('Number of results to return per page.')
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = None
last_page_strings = ('last',)
template = 'rest_framework/pagination/numbers.html'
invalid_page_message = _('Invalid page.')
def paginate_queryset(self, queryset, request, view=None):
"""
Paginate a queryset if required, either returning a
page object, or `None` if pagination is not configured for this view.
"""
page_size = self.get_page_size(request)
if not page_size:
return None
paginator = self.django_paginator_class(queryset, page_size)
page_number = request.query_params.get(self.page_query_param, 1)
if page_number in self.last_page_strings:
page_number = paginator.num_pages
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(exc)
)
raise NotFound(msg)
if paginator.num_pages > 1 and self.template is not None:
# The browsable API should display pagination controls.
self.display_page_controls = True
self.request = request
return list(self.page)
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_page_size(self, request):
if self.page_size_query_param:
try:
return _positive_int(
request.query_params[self.page_size_query_param],
strict=True,
cutoff=self.max_page_size
)
except (KeyError, ValueError):
pass
return self.page_size
def get_next_link(self):
if not self.page.has_next():
return None
url = self.request.build_absolute_uri()
page_number = self.page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if not self.page.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = self.page.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.page_query_param)
else:
return replace_query_param(base_url, self.page_query_param, page_number)
current = self.page.number
final = self.page.paginator.num_pages
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = self.get_html_context()
return template.render(context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
fields = [
coreapi.Field(
name=self.page_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Page',
description=force_text(self.page_query_description)
)
)
]
if self.page_size_query_param is not None:
fields.append(
coreapi.Field(
name=self.page_size_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Page size',
description=force_text(self.page_size_query_description)
)
)
)
return fields
class LimitOffsetPagination(BasePagination):
"""
A limit/offset based style. For example:
http://api.example.org/accounts/?limit=100
http://api.example.org/accounts/?offset=400&limit=100
"""
default_limit = api_settings.PAGE_SIZE
limit_query_param = 'limit'
limit_query_description = _('Number of results to return per page.')
offset_query_param = 'offset'
offset_query_description = _('The initial index from which to return the results.')
max_limit = None
template = 'rest_framework/pagination/numbers.html'
def paginate_queryset(self, queryset, request, view=None):
self.count = self.get_count(queryset)
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.request = request
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
if self.count == 0 or self.offset > self.count:
return []
return list(queryset[self.offset:self.offset + self.limit])
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_limit(self, request):
if self.limit_query_param:
try:
return _positive_int(
request.query_params[self.limit_query_param],
strict=True,
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
def get_offset(self, request):
try:
return _positive_int(
request.query_params[self.offset_query_param],
)
except (KeyError, ValueError):
return 0
def get_next_link(self):
if self.offset + self.limit >= self.count:
return None
url = self.request.build_absolute_uri()
url = replace_query_param(url, self.limit_query_param, self.limit)
offset = self.offset + self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_previous_link(self):
if self.offset <= 0:
return None
url = self.request.build_absolute_uri()
url = replace_query_param(url, self.limit_query_param, self.limit)
if self.offset - self.limit <= 0:
return remove_query_param(url, self.offset_query_param)
offset = self.offset - self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
if self.limit:
current = _divide_with_ceil(self.offset, self.limit) + 1
# The number of pages is a little bit fiddly.
# We need to sum both the number of pages from current offset to end
# plus the number of pages up to the current offset.
# When offset is not strictly divisible by the limit then we may
# end up introducing an extra page as an artifact.
final = (
_divide_with_ceil(self.count - self.offset, self.limit) +
_divide_with_ceil(self.offset, self.limit)
)
if final < 1:
final = 1
else:
current = 1
final = 1
if current > final:
current = final
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.offset_query_param)
else:
offset = self.offset + ((page_number - current) * self.limit)
return replace_query_param(base_url, self.offset_query_param, offset)
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = self.get_html_context()
return template.render(context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.limit_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Limit',
description=force_text(self.limit_query_description)
)
),
coreapi.Field(
name=self.offset_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Offset',
description=force_text(self.offset_query_description)
)
)
]
def get_count(self, queryset):
"""
Determine an object count, supporting either querysets or regular lists.
"""
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
class CursorPagination(BasePagination):
"""
The cursor pagination implementation is necessarily complex.
For an overview of the position/offset style we use, see this post:
http://cra.mr/2011/03/08/building-cursors-for-the-disqus-api
"""
cursor_query_param = 'cursor'
cursor_query_description = _('The pagination cursor value.')
page_size = api_settings.PAGE_SIZE
invalid_cursor_message = _('Invalid cursor')
ordering = '-created'
template = 'rest_framework/pagination/previous_and_next.html'
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = None
page_size_query_description = _('Number of results to return per page.')
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = None
# The offset in the cursor is used in situations where we have a
# nearly-unique index. (Eg millisecond precision creation timestamps)
# We guard against malicious users attempting to cause expensive database
# queries, by having a hard cap on the maximum possible size of the offset.
offset_cutoff = 1000
def paginate_queryset(self, queryset, request, view=None):
self.page_size = self.get_page_size(request)
if not self.page_size:
return None
self.base_url = request.build_absolute_uri()
self.ordering = self.get_ordering(request, queryset, view)
self.cursor = self.decode_cursor(request)
if self.cursor is None:
(offset, reverse, current_position) = (0, False, None)
else:
(offset, reverse, current_position) = self.cursor
# Cursor pagination always enforces an ordering.
if reverse:
queryset = queryset.order_by(*_reverse_ordering(self.ordering))
else:
queryset = queryset.order_by(*self.ordering)
# If we have a cursor with a fixed position then filter by that.
if current_position is not None:
order = self.ordering[0]
is_reversed = order.startswith('-')
order_attr = order.lstrip('-')
# Test for: (cursor reversed) XOR (queryset reversed)
if self.cursor.reverse != is_reversed:
kwargs = {order_attr + '__lt': current_position}
else:
kwargs = {order_attr + '__gt': current_position}
queryset = queryset.filter(**kwargs)
# If we have an offset cursor then offset the entire page by that amount.
# We also always fetch an extra item in order to determine if there is a
# page following on from this one.
results = list(queryset[offset:offset + self.page_size + 1])
self.page = list(results[:self.page_size])
# Determine the position of the final item following the page.
if len(results) > len(self.page):
has_following_position = True
following_position = self._get_position_from_instance(results[-1], self.ordering)
else:
has_following_position = False
following_position = None
# If we have a reverse queryset, then the query ordering was in reverse
# so we need to reverse the items again before returning them to the user.
if reverse:
self.page = list(reversed(self.page))
if reverse:
# Determine next and previous positions for reverse cursors.
self.has_next = (current_position is not None) or (offset > 0)
self.has_previous = has_following_position
if self.has_next:
self.next_position = current_position
if self.has_previous:
self.previous_position = following_position
else:
# Determine next and previous positions for forward cursors.
self.has_next = has_following_position
self.has_previous = (current_position is not None) or (offset > 0)
if self.has_next:
self.next_position = following_position
if self.has_previous:
self.previous_position = current_position
# Display page controls in the browsable API if there is more
# than one page.
if (self.has_previous or self.has_next) and self.template is not None:
self.display_page_controls = True
return self.page
def get_page_size(self, request):
if self.page_size_query_param:
try:
return _positive_int(
request.query_params[self.page_size_query_param],
strict=True,
cutoff=self.max_page_size
)
except (KeyError, ValueError):
pass
return self.page_size
def get_next_link(self):
if not self.has_next:
return None
if self.cursor and self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[-1], self.ordering)
else:
compare = self.next_position
offset = 0
for item in reversed(self.page):
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this position has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_previous:
# We are on the first page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# The change in direction will introduce a paging artifact,
# where we end up skipping forward a few extra items.
offset = 0
position = self.previous_position
else:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.previous_position
cursor = Cursor(offset=offset, reverse=False, position=position)
return self.encode_cursor(cursor)
def get_previous_link(self):
if not self.has_previous:
return None
if self.cursor and not self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[0], self.ordering)
else:
compare = self.previous_position
offset = 0
for item in self.page:
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this position has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_next:
# We are on the final page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.next_position
else:
# The change in direction will introduce a paging artifact,
# where we end up skipping back a few extra items.
offset = 0
position = self.next_position
cursor = Cursor(offset=offset, reverse=True, position=position)
return self.encode_cursor(cursor)
def get_ordering(self, request, queryset, view):
"""
Return a tuple of strings, that may be used in an `order_by` method.
"""
ordering_filters = [
filter_cls for filter_cls in getattr(view, 'filter_backends', [])
if hasattr(filter_cls, 'get_ordering')
]
if ordering_filters:
# If a filter exists on the view that implements `get_ordering`
# then we defer to that filter to determine the ordering.
filter_cls = ordering_filters[0]
filter_instance = filter_cls()
ordering = filter_instance.get_ordering(request, queryset, view)
assert ordering is not None, (
'Using cursor pagination, but filter class {filter_cls} '
'returned a `None` ordering.'.format(
filter_cls=filter_cls.__name__
)
)
else:
# The default case is to check for an `ordering` attribute
# on this pagination instance.
ordering = self.ordering
assert ordering is not None, (
'Using cursor pagination, but no ordering attribute was declared '
'on the pagination class.'
)
assert '__' not in ordering, (
'Cursor pagination does not support double underscore lookups '
'for orderings. Orderings should be an unchanging, unique or '
'nearly-unique field on the model, such as "-created" or "pk".'
)
assert isinstance(ordering, (six.string_types, list, tuple)), (
'Invalid ordering. Expected string or tuple, but got {type}'.format(
type=type(ordering).__name__
)
)
if isinstance(ordering, six.string_types):
return (ordering,)
return tuple(ordering)
def decode_cursor(self, request):
"""
Given a request with a cursor, return a `Cursor` instance.
"""
# Determine if we have a cursor, and if so then decode it.
encoded = request.query_params.get(self.cursor_query_param)
if encoded is None:
return None
try:
querystring = b64decode(encoded.encode('ascii')).decode('ascii')
tokens = urlparse.parse_qs(querystring, keep_blank_values=True)
offset = tokens.get('o', ['0'])[0]
offset = _positive_int(offset, cutoff=self.offset_cutoff)
reverse = tokens.get('r', ['0'])[0]
reverse = bool(int(reverse))
position = tokens.get('p', [None])[0]
except (TypeError, ValueError):
raise NotFound(self.invalid_cursor_message)
return Cursor(offset=offset, reverse=reverse, position=position)
def encode_cursor(self, cursor):
"""
Given a Cursor instance, return an url with encoded cursor.
"""
tokens = {}
if cursor.offset != 0:
tokens['o'] = str(cursor.offset)
if cursor.reverse:
tokens['r'] = '1'
if cursor.position is not None:
tokens['p'] = cursor.position
querystring = urlparse.urlencode(tokens, doseq=True)
encoded = b64encode(querystring.encode('ascii')).decode('ascii')
return replace_query_param(self.base_url, self.cursor_query_param, encoded)
def _get_position_from_instance(self, instance, ordering):
field_name = ordering[0].lstrip('-')
if isinstance(instance, dict):
attr = instance[field_name]
else:
attr = getattr(instance, field_name)
return six.text_type(attr)
def get_paginated_response(self, data):
return Response(OrderedDict([
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_html_context(self):
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link()
}
def to_html(self):
template = loader.get_template(self.template)
context = self.get_html_context()
return template.render(context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
fields = [
coreapi.Field(
name=self.cursor_query_param,
required=False,
location='query',
schema=coreschema.String(
title='Cursor',
description=force_text(self.cursor_query_description)
)
)
]
if self.page_size_query_param is not None:
fields.append(
coreapi.Field(
name=self.page_size_query_param,
required=False,
location='query',
schema=coreschema.Integer(
title='Page size',
description=force_text(self.page_size_query_description)
)
)
)
return fields
| gpl-3.0 |
ikben/troposphere | tests/test_rds.py | 2 | 11005 | import unittest
import troposphere.rds as rds
from troposphere import If, Parameter, Ref
AWS_NO_VALUE = "AWS::NoValue"
class TestRDS(unittest.TestCase):
def test_it_allows_an_rds_instance_created_from_a_snapshot(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=100,
DBInstanceClass='db.m1.small',
Engine='MySQL',
DBSnapshotIdentifier='SomeSnapshotIdentifier'
)
rds_instance.to_dict()
def test_it_allows_an_rds_instance_with_master_username_and_password(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword'
)
rds_instance.to_dict()
def test_it_rds_instances_require_either_a_snapshot_or_credentials(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL'
)
with self.assertRaisesRegexp(
ValueError,
r'Either \(MasterUsername and MasterUserPassword\) or'
r' DBSnapshotIdentifier are required'
):
rds_instance.to_dict()
def test_it_allows_an_rds_replica(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier'
)
rds_instance.to_dict()
def test_replica_settings_are_inherited(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier',
BackupRetentionPeriod="1",
DBName="SomeName",
MasterUsername="SomeUsername",
MasterUserPassword="SomePassword",
PreferredBackupWindow="10:00-11:00",
MultiAZ=True,
DBSnapshotIdentifier="SomeDBSnapshotIdentifier",
)
with self.assertRaisesRegexp(
ValueError,
'BackupRetentionPeriod, DBName, DBSnapshotIdentifier, '
'MasterUserPassword, MasterUsername, '
'MultiAZ, PreferredBackupWindow '
'properties can\'t be provided when '
'SourceDBInstanceIdentifier is present '
'AWS::RDS::DBInstance.'
):
rds_instance.to_dict()
def test_it_rds_instances_require_encryption_if_kms_key_provided(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
KmsKeyId='arn:aws:kms:us-east-1:123456789012:key/'
'12345678-1234-1234-1234-123456789012'
)
with self.assertRaisesRegexp(
ValueError,
'If KmsKeyId is provided, StorageEncrypted is required'
):
rds_instance.to_dict()
def test_it_allows_an_rds_instance_with_iops(self):
# ensure troposphere works with longs and ints
try:
long_number = long(2000)
except NameError:
# Python 3 doesn't have 'long' anymore
long_number = 2000
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=200,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
StorageType='io1',
Iops=long_number,
)
rds_instance.to_dict()
def test_optiongroup(self):
rds_optiongroup = rds.OptionGroup(
"OracleOptionGroup",
EngineName="oracle-ee",
MajorEngineVersion="12.1",
OptionGroupDescription="A test option group",
OptionConfigurations=[
rds.OptionConfiguration(
DBSecurityGroupMemberships=["default"],
OptionName="OEM",
Port="5500",
),
rds.OptionConfiguration(
OptionName="APEX",
),
]
)
rds_optiongroup.to_dict()
def test_fail_az_and_multiaz(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
AvailabilityZone="us-east-1",
MultiAZ=True)
with self.assertRaisesRegexp(ValueError, "if MultiAZ is set to "):
i.to_dict()
i.MultiAZ = "false"
i.to_dict()
i.MultiAZ = "true"
with self.assertRaisesRegexp(ValueError, "if MultiAZ is set to "):
i.to_dict()
i.MultiAZ = Ref(AWS_NO_VALUE)
i.to_dict()
def test_az_and_multiaz_funcs(self):
db_az = "us-east-1"
db_multi_az = Parameter("dbmultiaz", Type="String")
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
AvailabilityZone=If("db_az", Ref(db_az), Ref(AWS_NO_VALUE)),
MultiAZ=Ref(db_multi_az),
)
i.validate()
def test_io1_storage_type_and_iops(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
StorageType='io1')
with self.assertRaisesRegexp(ValueError, "Must specify Iops if "):
i.to_dict()
def test_storage_to_iops_ratio(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
DBInstanceClass="db.m1.small",
Engine="postgres",
StorageType='io1',
Iops=6000,
AllocatedStorage=10)
with self.assertRaisesRegexp(ValueError, " must be at least 100 "):
i.to_dict()
i.AllocatedStorage = 100
with self.assertRaisesRegexp(
ValueError, " must be no less than 1/50th "):
i.to_dict()
i.Iops = 5000
i.to_dict()
def test_snapshot(self):
i = rds.DBInstance(
'MyDB',
DBName='test',
AllocatedStorage=25,
DBInstanceClass='db.m4.large',
DBSubnetGroupName='default',
DBSnapshotIdentifier='id',
)
i.to_dict()
def test_snapshot_and_engine(self):
i = rds.DBInstance(
'MyDB',
DBName='test',
AllocatedStorage=25,
DBInstanceClass='db.m4.large',
DBSubnetGroupName='default',
DBSnapshotIdentifier='id',
Engine="postgres",
)
i.to_dict()
def test_no_snapshot_or_engine(self):
i = rds.DBInstance(
'MyDB',
DBName='test',
AllocatedStorage=25,
DBInstanceClass='db.m4.large',
DBSubnetGroupName='default',
)
with self.assertRaisesRegexp(
ValueError, "Resource Engine is required"):
i.to_dict()
class TestRDSValidators(unittest.TestCase):
def test_validate_iops(self):
with self.assertRaises(ValueError):
rds.validate_iops(500)
rds.validate_iops(2000)
rds.validate_iops(0)
def test_validate_storage_type(self):
for t in rds.VALID_STORAGE_TYPES:
rds.validate_storage_type(t)
with self.assertRaises(ValueError):
rds.validate_storage_type("bad_storage_type")
def test_validate_engine(self):
for e in rds.VALID_DB_ENGINES:
rds.validate_engine(e)
with self.assertRaises(ValueError):
rds.validate_engine("bad_engine")
def test_validate_engine_mode(self):
for e in rds.VALID_DB_ENGINE_MODES:
rds.validate_engine_mode(e)
with self.assertRaises(ValueError):
rds.validate_engine_mode("bad_engine")
def test_validate_license_model(self):
for lm in rds.VALID_LICENSE_MODELS:
rds.validate_license_model(lm)
with self.assertRaises(ValueError):
rds.validate_license_model("bad_license_model")
def test_validate_backup_window(self):
good_windows = ("10:00-11:00", "22:00-06:00")
for w in good_windows:
rds.validate_backup_window(w)
bad_format = ("bad_backup_window", "28:11-10:00", "10:00-28:11")
for w in bad_format:
with self.assertRaisesRegexp(ValueError, "must be in the format"):
rds.validate_backup_window(w)
with self.assertRaisesRegexp(ValueError, "must be at least 30 "):
rds.validate_backup_window("10:00-10:10")
def test_validate_maintenance_window(self):
good_windows = ("Mon:10:00-Mon:16:30", "Mon:10:00-Wed:10:00",
"Sun:16:00-Mon:11:00")
for w in good_windows:
rds.validate_maintenance_window(w)
bad_format = ("bad_mainteance", "Mon:10:00-Tue:28:00", "10:00-22:00")
for w in bad_format:
with self.assertRaisesRegexp(ValueError, "must be in the format"):
rds.validate_maintenance_window(w)
bad_days = ("Boo:10:00-Woo:10:30", "Boo:10:00-Tue:10:30",
"Mon:10:00-Boo:10:30")
for w in bad_days:
with self.assertRaisesRegexp(ValueError, " day part of ranges "):
rds.validate_maintenance_window(w)
with self.assertRaisesRegexp(ValueError, "must be at least 30 "):
rds.validate_maintenance_window("Mon:10:00-Mon:10:10")
def test_validate_backup_retention_period(self):
for d in (1, 10, 15, 35):
rds.validate_backup_retention_period(d)
with self.assertRaisesRegexp(ValueError, " cannot be larger than 35 "):
rds.validate_backup_retention_period(40)
rds.validate_backup_retention_period(10)
def test_validate_capacity(self):
for e in rds.VALID_SCALING_CONFIGURATION_CAPACITIES:
rds.validate_capacity(e)
with self.assertRaises(ValueError):
rds.validate_capacity(3)
| bsd-2-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py | 354 | 5544 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshi", "etree_lxml"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
Args:
treeType (str): the name of the tree type required (case-insensitive).
Supported values are:
- "dom": The xml.dom.minidom DOM implementation
- "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
- "lxml": Optimized walker for lxml.etree
- "genshi": a Genshi stream
Implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the
"etree" tree type only).
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
| mit |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.6/site-packages/pip/_vendor/html5lib/treewalkers/base.py | 355 | 4939 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| mit |
yulang/linux-2.6.39.2_tcp_lab | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
amygdala/tensorflow-workshop | workshop_sections/getting_started/starter_tf_graph/tf_matrix_mul.py | 1 | 1548 | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
m1 = np.array([[1., 2.], [3., 4.], [5., 6.], [7., 8.]], dtype=np.float32)
# Input data.
m1_input = tf.placeholder(tf.float32, shape=[4, 2])
m2 = tf.Variable(tf.random_uniform([2, 3], -1.0, 1.0))
m3 = tf.matmul(m1_input, m2)
# This is an identity op with the side effect of printing data when
# evaluating.
m3 = tf.Print(m3, [m3], message="m3 is: ")
# Add variable initializer.
init = tf.global_variables_initializer() # initialize_all_variables() prior to TF0.12
with tf.Session() as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
print("m2: {}".format(m2))
print("eval m2: {}".format(m2.eval()))
feed_dict = {m1_input: m1}
result = session.run([m3], feed_dict=feed_dict)
print("\nresult: {}\n".format(result))
| apache-2.0 |
vj-ug/gcloud-python | gcloud/credentials.py | 7 | 14075 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple wrapper around the OAuth2 credentials library."""
import base64
import datetime
import six
from six.moves.urllib.parse import urlencode # pylint: disable=F0401
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from oauth2client import client
from oauth2client.client import _get_application_default_credential_from_file
from oauth2client import crypt
from oauth2client import service_account
try:
from google.appengine.api import app_identity
except ImportError:
app_identity = None
try:
from oauth2client.appengine import AppAssertionCredentials as _GAECreds
except ImportError:
class _GAECreds(object):
"""Dummy class if not in App Engine environment."""
from gcloud._helpers import UTC
from gcloud._helpers import _NOW
from gcloud._helpers import _microseconds_from_datetime
def get_credentials():
"""Gets credentials implicitly from the current environment.
.. note::
You should not need to use this function directly. Instead, use the
helper method :func:`gcloud.datastore.__init__.get_connection`
which uses this method under the hood.
Checks environment in order of precedence:
* Google App Engine (production and testing)
* Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to
a file with stored credentials information.
* Stored "well known" file associated with ``gcloud`` command line tool.
* Google Compute Engine production environment.
The file referred to in GOOGLE_APPLICATION_CREDENTIALS is expected to
contain information about credentials that are ready to use. This means
either service account information or user account information with
a ready-to-use refresh token::
{ {
'type': 'authorized_user', 'type': 'service_account',
'client_id': '...', 'client_id': '...',
'client_secret': '...', OR 'client_email': '...',
'refresh_token': '..., 'private_key_id': '...',
} 'private_key': '...',
}
The second of these is simply a JSON key downloaded from the Google APIs
console. The first is a close cousin of the "client secrets" JSON file
used by ``oauth2client.clientsecrets`` but differs in formatting.
:rtype: :class:`oauth2client.client.GoogleCredentials`,
:class:`oauth2client.appengine.AppAssertionCredentials`,
:class:`oauth2client.gce.AppAssertionCredentials`,
:class:`oauth2client.service_account._ServiceAccountCredentials`
:returns: A new credentials instance corresponding to the implicit
environment.
"""
return client.GoogleCredentials.get_application_default()
def get_for_service_account_json(json_credentials_path, scope=None):
"""Gets the credentials for a service account with JSON key.
:type json_credentials_path: string
:param json_credentials_path: The path to a private key file (this file was
given to you when you created the service
account). This file must contain a JSON
object with a private key and other
credentials information (downloaded from the
Google APIs console).
:type scope: string or tuple of string
:param scope: The scope against which to authenticate. (Different services
require different scopes, check the documentation for which
scope is required for the different levels of access to any
particular API.)
:rtype: :class:`oauth2client.client.GoogleCredentials`,
:class:`oauth2client.service_account._ServiceAccountCredentials`
:returns: New service account or Google (for a user JSON key file)
credentials object.
"""
credentials = _get_application_default_credential_from_file(
json_credentials_path)
if scope is not None:
credentials = credentials.create_scoped(scope)
return credentials
def get_for_service_account_p12(client_email, private_key_path, scope=None):
"""Gets the credentials for a service account with PKCS12 / p12 key.
.. note::
This method is not used by default, instead :func:`get_credentials`
is used. This method is intended to be used when the environments is
known explicitly and detecting the environment implicitly would be
superfluous.
:type client_email: string
:param client_email: The e-mail attached to the service account.
:type private_key_path: string
:param private_key_path: The path to a private key file (this file was
given to you when you created the service
account). This file must be in P12 format.
:type scope: string or tuple of string
:param scope: The scope against which to authenticate. (Different services
require different scopes, check the documentation for which
scope is required for the different levels of access to any
particular API.)
:rtype: :class:`oauth2client.client.SignedJwtAssertionCredentials`
:returns: A new ``SignedJwtAssertionCredentials`` instance with the
needed service account settings.
"""
return client.SignedJwtAssertionCredentials(
service_account_name=client_email,
private_key=open(private_key_path, 'rb').read(),
scope=scope)
def _get_pem_key(credentials):
"""Gets RSA key for a PEM payload from a credentials object.
:type credentials: :class:`client.SignedJwtAssertionCredentials`,
:class:`service_account._ServiceAccountCredentials`
:param credentials: The credentials used to create an RSA key
for signing text.
:rtype: :class:`Crypto.PublicKey.RSA._RSAobj`
:returns: An RSA object used to sign text.
:raises: `TypeError` if `credentials` is the wrong type.
"""
if isinstance(credentials, client.SignedJwtAssertionCredentials):
# Take our PKCS12 (.p12) key and make it into a RSA key we can use.
pem_text = crypt.pkcs12_key_as_pem(credentials.private_key,
credentials.private_key_password)
elif isinstance(credentials, service_account._ServiceAccountCredentials):
pem_text = credentials._private_key_pkcs8_text
else:
raise TypeError((credentials,
'not a valid service account credentials type'))
return RSA.importKey(pem_text)
def _get_signature_bytes(credentials, string_to_sign):
"""Uses crypto attributes of credentials to sign a string/bytes.
:type credentials: :class:`client.SignedJwtAssertionCredentials`,
:class:`service_account._ServiceAccountCredentials`,
:class:`_GAECreds`
:param credentials: The credentials used for signing text (typically
involves the creation of an RSA key).
:type string_to_sign: string
:param string_to_sign: The string to be signed by the credentials.
:rtype: bytes
:returns: Signed bytes produced by the credentials.
"""
if isinstance(credentials, _GAECreds):
_, signed_bytes = app_identity.sign_blob(string_to_sign)
return signed_bytes
else:
pem_key = _get_pem_key(credentials)
# Sign the string with the RSA key.
signer = PKCS1_v1_5.new(pem_key)
if not isinstance(string_to_sign, six.binary_type):
string_to_sign = string_to_sign.encode('utf-8')
signature_hash = SHA256.new(string_to_sign)
return signer.sign(signature_hash)
def _get_service_account_name(credentials):
"""Determines service account name from a credentials object.
:type credentials: :class:`client.SignedJwtAssertionCredentials`,
:class:`service_account._ServiceAccountCredentials`,
:class:`_GAECreds`
:param credentials: The credentials used to determine the service
account name.
:rtype: string
:returns: Service account name associated with the credentials.
:raises: :class:`ValueError` if the credentials are not a valid service
account type.
"""
service_account_name = None
if isinstance(credentials, client.SignedJwtAssertionCredentials):
service_account_name = credentials.service_account_name
elif isinstance(credentials, service_account._ServiceAccountCredentials):
service_account_name = credentials._service_account_email
elif isinstance(credentials, _GAECreds):
service_account_name = app_identity.get_service_account_name()
if service_account_name is None:
raise ValueError('Service account name could not be determined '
'from credentials')
return service_account_name
def _get_signed_query_params(credentials, expiration, string_to_sign):
"""Gets query parameters for creating a signed URL.
:type credentials: :class:`client.SignedJwtAssertionCredentials`,
:class:`service_account._ServiceAccountCredentials`
:param credentials: The credentials used to create an RSA key
for signing text.
:type expiration: int or long
:param expiration: When the signed URL should expire.
:type string_to_sign: string
:param string_to_sign: The string to be signed by the credentials.
:rtype: dict
:returns: Query parameters matching the signing credentials with a
signed payload.
"""
signature_bytes = _get_signature_bytes(credentials, string_to_sign)
signature = base64.b64encode(signature_bytes)
service_account_name = _get_service_account_name(credentials)
return {
'GoogleAccessId': service_account_name,
'Expires': str(expiration),
'Signature': signature,
}
def _get_expiration_seconds(expiration):
"""Convert 'expiration' to a number of seconds in the future.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:rtype: int
:returns: a timestamp as an absolute number of seconds.
"""
# If it's a timedelta, add it to `now` in UTC.
if isinstance(expiration, datetime.timedelta):
now = _NOW().replace(tzinfo=UTC)
expiration = now + expiration
# If it's a datetime, convert to a timestamp.
if isinstance(expiration, datetime.datetime):
micros = _microseconds_from_datetime(expiration)
expiration = micros // 10**6
if not isinstance(expiration, six.integer_types):
raise TypeError('Expected an integer timestamp, datetime, or '
'timedelta. Got %s' % type(expiration))
return expiration
def generate_signed_url(credentials, resource, expiration,
api_access_endpoint='',
method='GET', content_md5=None,
content_type=None):
"""Generate signed URL to provide query-string auth'n to a resource.
.. note::
If you are on Google Compute Engine, you can't generate a signed URL.
Follow https://github.com/GoogleCloudPlatform/gcloud-python/issues/922
for updates on this. If you'd like to be able to generate a signed URL
from GCE, you can use a standard service account from a JSON file
rather than a GCE service account.
:type credentials: :class:`oauth2client.appengine.AppAssertionCredentials`
:param credentials: Credentials object with an associated private key to
sign text.
:type resource: string
:param resource: A pointer to a specific resource
(typically, ``/bucket-name/path/to/blob.txt``).
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:type api_access_endpoint: string
:param api_access_endpoint: Optional URI base. Defaults to empty string.
:type method: string
:param method: The HTTP verb that will be used when requesting the URL.
:type content_md5: string
:param content_md5: The MD5 hash of the object referenced by
``resource``.
:type content_type: string
:param content_type: The content type of the object referenced by
``resource``.
:rtype: string
:returns: A signed URL you can use to access the resource
until expiration.
"""
expiration = _get_expiration_seconds(expiration)
# Generate the string to sign.
string_to_sign = '\n'.join([
method,
content_md5 or '',
content_type or '',
str(expiration),
resource])
# Set the right query parameters.
query_params = _get_signed_query_params(credentials,
expiration,
string_to_sign)
# Return the built URL.
return '{endpoint}{resource}?{querystring}'.format(
endpoint=api_access_endpoint, resource=resource,
querystring=urlencode(query_params))
| apache-2.0 |
miguelparaiso/OdooAccessible | addons/hr_attendance/wizard/__init__.py | 375 | 1073 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance_error
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
maxwellalive/YCDIVFX_MaxPlus | packages/maxconnect/tomax.py | 2 | 7941 | '''
Taken from Sublime3dsmax : http://cbuelter.de/?p=535
'''
############################################################################
#
# This module finds 3ds Max and the MAXScript Listener and can
# send strings and button strokes to it.
#
# Completely based on m2u: http://alfastuff.wordpress.com/2013/10/13/m2u/
# and figured out by the amazing Johannes: http://alfastuff.wordpress.com/
#
# Known issues: EnumPos for childwindows changes,
# e.g. if using create mode or hierarchy mode.
# Current workaround is to use the first handle
# that matches cls="MXS_Scintilla", which is the
# mini macro recorder, to paste text into.
#
############################################################################
# keeps all the required UI elements of the Max and talks to them
import ctypes #required for windows ui stuff
import threading
MAX_TITLE_IDENTIFIER = r"Autodesk 3ds Max"
# UI element window handles
gMaxThreadProcessID = None
gMainWindow = None
gMiniMacroRecorder = None
# windows functions and constants
# stuff for finding and analyzing UI Elements
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
EnumChildWindows = ctypes.windll.user32.EnumChildWindows
FindWindowEx = ctypes.windll.user32.FindWindowExW
GetClassName = ctypes.windll.user32.GetClassNameW
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
GetWindow = ctypes.windll.user32.GetWindow
GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
PostMessage = ctypes.windll.user32.PostMessageA
SendMessage = ctypes.windll.user32.SendMessageA
WM_SETTEXT = 0x000C
WM_KEYDOWN = 0x0100
WM_KEYUP = 0x0101
WM_CHAR = 0x0102 # the alternative to WM_KEYDOWN
VK_RETURN = 0x0D # Enter key
# attaching is required for SendMessage and the like to actually work like it should
AttachThreadInput = ctypes.windll.user32.AttachThreadInput
class ThreadWinLParm(ctypes.Structure):
"""lParam object to get a name to and an object back from a windows
enumerator function.
.. seealso:: :func:`_getChildWindowByName`
"""
_fields_=[
("name", ctypes.c_wchar_p),
("cls", ctypes.c_wchar_p),
("hwnd", ctypes.POINTER(ctypes.c_long)),
("enumPos", ctypes.c_int),
("_enum", ctypes.c_int) # keep track of current enum step
]
def _getChildWindowByName(hwnd, lParam):
"""callback function to be called by EnumChildWindows, see
:func:`getChildWindowByName`
:param hwnd: the window handle
:param lParam: a :ref:`ctypes.byref` instance of :class:`ThreadWinLParam`
if name is None, the cls name is taken,
if cls is None, the name is taken,
if both are None, all elements are printed
if both have values, only the element matching both will fit
"""
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
param = ctypes.cast(lParam, ctypes.POINTER(ThreadWinLParm)).contents
param._enum += 1
length = 255
cbuff = ctypes.create_unicode_buffer(length + 1)
GetClassName(hwnd, cbuff, length+1)
if param.name == None and param.cls != None:
if param.cls in cbuff.value:# == param.cls:
param.hwnd = hwnd
return False
elif param.cls == None and param.name != None:
if buff.value == param.name:
param.hwnd = hwnd
return False
elif param.cls != None and param.name != None:
if buff.value == param.name and param.cls in cbuff.value:# == param.cls:
param.hwnd = hwnd
return False
else: #both values are None, print the current element
print ("wnd cls: "+cbuff.value+" name: "+buff.value+" enum: "+str(param._enum))
return True
def getChildWindowByName(hwnd, name = None, cls = None):
"""find a window by its name or clsName, returns the window's hwnd
:param hwnd: the parent window's hwnd
:param name: the name/title to search for
:param cls: the clsName to search for
:return: the hwnd of the matching child window
if name is None, the cls name is taken,
if cls is None, the name is taken,
if both are None, all elements are printed
if both have values, only the element matching both will fit.
.. seealso:: :func:`_getChildWindowByName`, :func:`getChildWindowByEnumPos`
"""
param = ThreadWinLParm(name=name,cls=cls,_enum=-1)
lParam = ctypes.byref(param)
EnumChildWindows(hwnd, EnumWindowsProc(_getChildWindowByName),lParam)
return param.hwnd
def getMXSMiniMacroRecorder():
"""convenience function
"""
# The function will return the first param that matches the class name.
# Thankfully, this is the MAXScript Mini Listener.
global gMainWindow
miniMacroRecorderHandle = getChildWindowByName(gMainWindow, name=None, cls="MXS_Scintilla")
return miniMacroRecorderHandle
def _getChildWindowByEnumPos(hwnd, lParam):
""" callback function, see :func:`getChildWindowByEnumPos` """
param = ctypes.cast(lParam, ctypes.POINTER(ThreadWinLParm)).contents
param._enum += 1
if param._enum == param.enumPos:
param.hwnd = hwnd
return False
return True
def getChildWindowByEnumPos(hwnd, pos):
"""get a child window by its enum pos, return its hwnd
:param hwnd: the parent window's hwnd
:param pos: the number to search for
:return: the hwnd of the matching child window
This function uses the creation order which is reflected in Windows Enumerate
functions to get the handle to a certain window. This is useful when the
name or cls of the desired window is not unique or not given.
You can count the enum pos by printing all child windows of a window.
.. seealso:: :func:`getChildWindowByName`
"""
param = ThreadWinLParm(name = None, cls = None, enumPos = pos, _enum = -1)
EnumChildWindows( hwnd, EnumWindowsProc(_getChildWindowByEnumPos), ctypes.byref(param))
return param.hwnd
def attachThreads(hwnd):
"""tell Windows to attach the program and the max threads.
This will give us some benefits in control, for example SendMessage calls to
the max thread will only return when Max has processed the message, amazing!
"""
thread = GetWindowThreadProcessId(hwnd, 0) #max thread
global gMaxThreadProcessID
gMaxThreadProcessID = thread
thisThread = threading.current_thread().ident #program thread
AttachThreadInput(thread, thisThread, True)
def _getWindows(hwnd, lParam):
"""callback function, find the Max Window (and fill the ui element vars)
This is a callback function. Windows itself will call this function for
every top-level window in EnumWindows iterator function.
.. seealso:: :func:`connectToMax`
"""
if IsWindowVisible(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
global MAX_TITLE_IDENTIFIER
if MAX_TITLE_IDENTIFIER in buff.value:
global gMainWindow, gMaxThreadProcessID
gMainWindow = hwnd
attachThreads(gMainWindow)
# Find MAXScript Mini Listener
global gMiniMacroRecorder
gMiniMacroRecorder = getMXSMiniMacroRecorder()
return False
return True
def connectToMax():
global gMainWindow
EnumWindows(EnumWindowsProc(_getWindows), 0)
return (gMainWindow is not None)
def fireCommand(command):
"""Executes the command string in Max.
';' at end needed for ReturnKey to be accepted."""
global gMiniMacroRecorder
SendMessage(gMiniMacroRecorder, WM_SETTEXT, 0, str(command) )
SendMessage(gMiniMacroRecorder, WM_CHAR, VK_RETURN, 0)
| gpl-2.0 |
CyanHacker-Lollipop/kernel_htc_msm8974 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
rhjdjong/SlipLib | src/sliplib/slip.py | 2 | 8106 | # Copyright (c) 2020. Ruud de Jong
# This file is part of the SlipLib project which is released under the MIT license.
# See https://github.com/rhjdjong/SlipLib for details.
"""
Constants
---------
.. data:: END
.. data:: ESC
.. data:: ESC_END
.. data:: ESC_ESC
These constants represent the special bytes
used by SLIP for delimiting and encoding messages.
Functions
---------
The following are lower-level functions, that should normally not be used directly.
.. autofunction:: encode
.. autofunction:: decode
.. autofunction:: is_valid
Classes
-------
.. autoclass:: Driver
Class :class:`Driver` offers the following methods:
.. automethod:: send
.. automethod:: receive
To enable recovery from a :exc:`ProtocolError`, the
:class:`Driver` class offers the following attribute and method:
.. autoattribute:: messages
.. automethod:: flush
"""
import collections
import re
from typing import Deque, List, Union
END = b'\xc0'
ESC = b'\xdb'
ESC_END = b'\xdc'
ESC_ESC = b'\xdd'
"""These constants represent the special SLIP bytes"""
class ProtocolError(ValueError):
"""Exception to indicate that a SLIP protocol error has occurred.
This exception is raised when an attempt is made to decode
a packet with an invalid byte sequence.
An invalid byte sequence is either an :const:`ESC` byte followed
by any byte that is not an :const:`ESC_ESC` or :const:`ESC_END` byte,
or a trailing :const:`ESC` byte as last byte of the packet.
The :exc:`ProtocolError` carries the invalid packet
as the first (and only) element in in its :attr:`args` tuple.
"""
def encode(msg: bytes) -> bytes:
"""Encodes a message (a byte sequence) into a SLIP-encoded packet.
Args:
msg: The message that must be encoded
Returns:
The SLIP-encoded message
"""
msg = bytes(msg)
return END + msg.replace(ESC, ESC + ESC_ESC).replace(END, ESC + ESC_END) + END
def decode(packet: bytes) -> bytes:
"""Retrieves the message from the SLIP-encoded packet.
Args:
packet: The SLIP-encoded message.
Note that this must be exactly one complete packet.
The :func:`decode` function does not provide any buffering
for incomplete packages, nor does it provide support
for decoding data with multiple packets.
Returns:
The decoded message
Raises:
ProtocolError: if the packet contains an invalid byte sequence.
"""
if not is_valid(packet):
raise ProtocolError(packet)
return packet.strip(END).replace(ESC + ESC_END, END).replace(ESC + ESC_ESC, ESC)
def is_valid(packet: bytes) -> bool:
"""Indicates if the packet's contents conform to the SLIP specification.
A packet is valid if:
* It contains no :const:`END` bytes other than leading and/or trailing :const:`END` bytes, and
* Each :const:`ESC` byte is followed by either an :const:`ESC_END` or an :const:`ESC_ESC` byte.
Args:
packet: The packet to inspect.
Returns:
:const:`True` if the packet is valid, :const:`False` otherwise
"""
packet = packet.strip(END)
return not (END in packet or
packet.endswith(ESC) or
re.search(ESC + b'[^' + ESC_END + ESC_ESC + b']', packet))
class Driver:
"""Class to handle the SLIP-encoding and decoding of messages
This class manages the handling of encoding and decoding of
messages according to the SLIP protocol.
"""
def __init__(self) -> None:
self._recv_buffer = b''
self._packets = collections.deque() # type: Deque[bytes]
self._messages = [] # type: List[bytes]
def send(self, message: bytes) -> bytes: # pylint: disable=no-self-use
"""Encodes a message into a SLIP-encoded packet.
The message can be any arbitrary byte sequence.
Args:
message: The message that must be encoded.
Returns:
A packet with the SLIP-encoded message.
"""
return encode(message)
def receive(self, data: Union[bytes, int]) -> List[bytes]:
"""Decodes data and gives a list of decoded messages.
Processes :obj:`data`, which must be a bytes-like object,
and returns a (possibly empty) list with :class:`bytes` objects,
each containing a decoded message.
Any non-terminated SLIP packets in :obj:`data`
are buffered, and processed with the next call to :meth:`receive`.
Args:
data: A bytes-like object to be processed.
An empty :obj:`data` parameter forces the internal
buffer to be flushed and decoded.
To accommodate iteration over byte sequences, an
integer in the range(0, 256) is also accepted.
Returns:
A (possibly empty) list of decoded messages.
Raises:
ProtocolError: When `data` contains an invalid byte sequence.
"""
# When a single byte is fed into this function
# it is received as an integer, not as a bytes object.
# It must first be converted into a bytes object.
if isinstance(data, int):
data = bytes((data,))
# Empty data indicates that the data reception is complete.
# To force a buffer flush, an END byte is added, so that the
# current contents of _recv_buffer will form a complete message.
if not data:
data = END
self._recv_buffer += data
# The following situations can occur:
#
# 1) _recv_buffer is empty or contains only END bytes --> no packets available
# 2) _recv_buffer contains non-END bytes --> packets are available
#
# Strip leading END bytes from _recv_buffer to avoid handling empty _packets.
self._recv_buffer = self._recv_buffer.lstrip(END)
if self._recv_buffer:
# The _recv_buffer contains non-END bytes.
# It is now split on sequences of one or more END bytes.
# The trailing element from the split operation is a possibly incomplete
# packet; this element is therefore used as the new _recv_buffer.
# If _recv_buffer contains one or more trailing END bytes,
# (meaning that there are no incomplete packets), then the last element,
# and therefore the new _recv_buffer, is an empty bytes object.
self._packets.extend(re.split(END + b'+', self._recv_buffer))
self._recv_buffer = self._packets.pop()
# Process the buffered packets
return self.flush()
def flush(self) -> List[bytes]:
"""Gives a list of decoded messages.
Decodes the packets in the internal buffer.
This enables the continuation of the processing
of received packets after a :exc:`ProtocolError`
has been handled.
Returns:
A (possibly empty) list of decoded messages from the buffered packets.
Raises:
ProtocolError: When any of the buffered packets contains an invalid byte sequence.
"""
messages = [] # type: List[bytes]
while self._packets:
packet = self._packets.popleft()
try:
msg = decode(packet)
except ProtocolError:
# Add any already decoded messages to the exception
self._messages = messages
raise
messages.append(msg)
return messages
@property
def messages(self) -> List[bytes]:
"""A list of decoded messages.
The read-only attribute :attr:`messages` contains
the messages that were
already decoded before a
:exc:`ProtocolError` was raised.
This enables the handler of the :exc:`ProtocolError`
exception to recover the messages up to the
point where the error occurred.
This attribute is cleared after it has been read.
"""
try:
return self._messages
finally:
self._messages = []
| mit |
TheYorickable/tf300t_jb_kernel | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
atpaino/socialsonar | twitter-1.10.0/build/lib/twitter/timezones.py | 26 | 1766 | # Retrieved from http://docs.python.org/2/library/datetime.html on 2013-05-24
from datetime import tzinfo, timedelta, datetime
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class.
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
| mit |
rat4/blackcoin | contrib/linearize/linearize-hashes.py | 105 | 2762 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 15715
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit |
SEL-Columbia/bamboo | fabfile.py | 2 | 2204 | import os
import sys
from fabric.api import env, run, cd
DEPLOYMENTS = {
'prod': {
'home': '/var/www/',
'host_string': '[email protected]',
'virtual_env': 'bamboo',
'repo_name': 'current',
'project': 'bamboo',
'docs': 'docs',
'branch': 'master',
'key_filename': os.path.expanduser('~/.ssh/modilabs.pem'),
'init_script': 'bamboo_uwsgi.sh',
'celeryd': 'celeryd',
}
}
def _run_in_virtualenv(command):
run('source ~/.virtualenvs/%s/bin/activate && %s' % (env.virtual_env,
command))
def _check_key_filename(deployment_name):
if 'key_filename' in DEPLOYMENTS[deployment_name] and \
not os.path.exists(DEPLOYMENTS[deployment_name]['key_filename']):
print 'Cannot find required permissions file: %s' % \
DEPLOYMENTS[deployment_name]['key_filename']
return False
return True
def _setup_env(deployment_name):
env.update(DEPLOYMENTS[deployment_name])
if not _check_key_filename(deployment_name):
sys.exit(1)
env.project_directory = os.path.join(env.home, env.project)
env.code_src = os.path.join(env.project_directory, env.repo_name)
env.doc_src = os.path.join(env.project_directory, env.repo_name, env.docs)
env.pip_requirements_file = os.path.join(
env.code_src, 'deploy/requirements/requirements.pip')
def deploy(deployment_name):
_setup_env(deployment_name)
# update code
with cd(env.code_src):
run('git fetch origin %(branch)s' % env)
run('git reset --hard origin/%(branch)s' % env)
run('git pull origin %(branch)s' % env)
run('find . -name "*.pyc" -delete')
# update docs
with cd(env.doc_src):
_run_in_virtualenv('make html')
# install dependencies
_run_in_virtualenv('pip install -r %s' % env.pip_requirements_file)
# restart celery
with cd(env.code_src):
_run_in_virtualenv('../shared/%s restart' % env.celeryd)
# restart the server
with cd(env.code_src):
_run_in_virtualenv('./scripts/%s restart' % env.init_script)
| bsd-3-clause |
s-hertel/ansible | test/units/modules/conftest.py | 35 | 1278 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
@pytest.fixture
def patch_ansible_module(request, mocker):
if isinstance(request.param, string_types):
args = request.param
elif isinstance(request.param, MutableMapping):
if 'ANSIBLE_MODULE_ARGS' not in request.param:
request.param = {'ANSIBLE_MODULE_ARGS': request.param}
if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
args = json.dumps(request.param)
else:
raise Exception('Malformed data to the patch_ansible_module pytest fixture')
mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
| gpl-3.0 |
jazkarta/edx-platform | common/lib/xmodule/xmodule/word_cloud_module.py | 104 | 7981 | """Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
from pkg_resources import resource_string
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.x_module import XModule
from xblock.fields import Scope, Dict, Boolean, List, Integer, String
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class WordCloudFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default="Word cloud"
)
num_inputs = Integer(
display_name=_("Inputs"),
help=_("Number of text boxes available for students to input words/sentences."),
scope=Scope.settings,
default=5,
values={"min": 1}
)
num_top_words = Integer(
display_name=_("Maximum Words"),
help=_("Maximum number of words to be displayed in generated word cloud."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
display_student_percents = Boolean(
display_name=_("Show Percents"),
help=_("Statistics are shown for entered words near that word."),
scope=Scope.settings,
default=True
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this student has posted words to the cloud."),
scope=Scope.user_state,
default=False
)
student_words = List(
help=_("Student answer."),
scope=Scope.user_state,
default=[]
)
all_words = Dict(
help=_("All possible words from all students."),
scope=Scope.user_state_summary
)
top_words = Dict(
help=_("Top num_top_words words for word cloud."),
scope=Scope.user_state_summary
)
class WordCloudModule(WordCloudFields, XModule):
"""WordCloud Xmodule"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [
resource_string(__name__, 'js/src/word_cloud/d3.min.js'),
resource_string(__name__, 'js/src/word_cloud/d3.layout.cloud.js'),
resource_string(__name__, 'js/src/word_cloud/word_cloud.js'),
resource_string(__name__, 'js/src/word_cloud/word_cloud_main.js'),
],
}
css = {'scss': [resource_string(__name__, 'css/word_cloud/display.scss')]}
js_module_name = "WordCloud"
def get_state(self):
"""Return success json answer for client."""
if self.submitted:
total_count = sum(self.all_words.itervalues())
return json.dumps({
'status': 'success',
'submitted': True,
'display_student_percents': pretty_bool(
self.display_student_percents
),
'student_words': {
word: self.all_words[word] for word in self.student_words
},
'total_count': total_count,
'top_words': self.prepare_words(self.top_words, total_count)
})
else:
return json.dumps({
'status': 'success',
'submitted': False,
'display_student_percents': False,
'student_words': {},
'total_count': 0,
'top_words': {}
})
def good_word(self, word):
"""Convert raw word to suitable word."""
return word.strip().lower()
def prepare_words(self, top_words, total_count):
"""Convert words dictionary for client API.
:param top_words: Top words dictionary
:type top_words: dict
:param total_count: Total number of words
:type total_count: int
:rtype: list of dicts. Every dict is 3 keys: text - actual word,
size - counter of word, percent - percent in top_words dataset.
Calculates corrected percents for every top word:
For every word except last, it calculates rounded percent.
For the last is 100 - sum of all other percents.
"""
list_to_return = []
percents = 0
for num, word_tuple in enumerate(top_words.iteritems()):
if num == len(top_words) - 1:
percent = 100 - percents
else:
percent = round(100.0 * word_tuple[1] / total_count)
percents += percent
list_to_return.append(
{
'text': word_tuple[0],
'size': word_tuple[1],
'percent': percent
}
)
return list_to_return
def top_dict(self, dict_obj, amount):
"""Return top words from all words, filtered by number of
occurences
:param dict_obj: all words
:type dict_obj: dict
:param amount: number of words to be in top dict
:type amount: int
:rtype: dict
"""
return dict(
sorted(
dict_obj.items(),
key=lambda x: x[1],
reverse=True
)[:amount]
)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
raw_student_words = data.getall('student_words[]')
student_words = filter(None, map(self.good_word, raw_student_words))
self.student_words = student_words
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
# speed issues
temp_all_words = self.all_words
self.submitted = True
# Save in all_words.
for word in self.student_words:
temp_all_words[word] = temp_all_words.get(word, 0) + 1
# Update top_words.
self.top_words = self.top_dict(
temp_all_words,
self.num_top_words
)
# Save all_words in database.
self.all_words = temp_all_words
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def get_html(self):
"""Template rendering."""
context = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'num_inputs': self.num_inputs,
'submitted': self.submitted
}
self.content = self.system.render_template('word_cloud.html', context)
return self.content
class WordCloudDescriptor(WordCloudFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""Descriptor for WordCloud Xmodule."""
module_class = WordCloudModule
template_dir_name = 'word_cloud'
| agpl-3.0 |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_tools/test_i18n.py | 5 | 2590 | """Tests to cover the Tools/i18n package"""
import os
import unittest
from test.support.script_helper import assert_python_ok
from test.test_tools import skip_if_missing, toolsdir
from test.support import temp_cwd
skip_if_missing()
class Test_pygettext(unittest.TestCase):
"""Tests for the pygettext.py tool"""
script = os.path.join(toolsdir,'i18n', 'pygettext.py')
def get_header(self, data):
""" utility: return the header of a .po file as a dictionary """
headers = {}
for line in data.split('\n'):
if not line or line.startswith(('#', 'msgid','msgstr')):
continue
line = line.strip('"')
key, val = line.split(':',1)
headers[key] = val.strip()
return headers
def test_header(self):
"""Make sure the required fields are in the header, according to:
http://www.gnu.org/software/gettext/manual/gettext.html#Header-Entry
"""
with temp_cwd(None) as cwd:
assert_python_ok(self.script)
with open('messages.pot') as fp:
data = fp.read()
header = self.get_header(data)
self.assertIn("Project-Id-Version", header)
self.assertIn("POT-Creation-Date", header)
self.assertIn("PO-Revision-Date", header)
self.assertIn("Last-Translator", header)
self.assertIn("Language-Team", header)
self.assertIn("MIME-Version", header)
self.assertIn("Content-Type", header)
self.assertIn("Content-Transfer-Encoding", header)
self.assertIn("Generated-By", header)
# not clear if these should be required in POT (template) files
#self.assertIn("Report-Msgid-Bugs-To", header)
#self.assertIn("Language", header)
#"Plural-Forms" is optional
def test_POT_Creation_Date(self):
""" Match the date format from xgettext for POT-Creation-Date """
from datetime import datetime
with temp_cwd(None) as cwd:
assert_python_ok(self.script)
with open('messages.pot') as fp:
data = fp.read()
header = self.get_header(data)
creationDate = header['POT-Creation-Date']
# peel off the escaped newline at the end of string
if creationDate.endswith('\\n'):
creationDate = creationDate[:-len('\\n')]
# This will raise if the date format does not exactly match.
datetime.strptime(creationDate, '%Y-%m-%d %H:%M%z')
| mit |
40223139/2015cdaa5-12 | static/Brython3.1.1-20150328-091302/Lib/markdown2.py | 669 | 8143 | import browser.html
import re
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
res = '<pre class="marked">%s</pre>\n' %res
return res,[]
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and (i==0 or ul or not lines[i-1].strip()):
print('is ul',lines[i])
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][1+nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol:
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].strip() \
and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if line.strip():
if section.line:
section.line += ' '
section.line += line
else:
sections.append(section)
section = Marked()
i += 1
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += '<p>'+mk+'\n'
scripts += _scripts
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
# replace \` by `
src = re.sub(r'\\\`','`',src)
# escape < > & in inline code
code_pattern = r'\`(\S.*?\S)\`'
src = re.sub(code_pattern,s_escape,src)
# also convert _
src = re.sub(code_pattern,s_unmark,src)
# inline links
link_pattern1 = r'\[(.+?)\]\s?\((.+?)\)'
def repl(mo):
g1,g2 = mo.groups()
g2 = re.sub('_','_',g2)
return '<a href="%s">%s</a>' %(g2,g1)
src = re.sub(link_pattern1,repl,src)
# reference links
link_pattern2 = r'\[(.+?)\]\s?\[(.*?)\]'
while True:
mo = re.search(link_pattern2,src)
if mo is None:break
text,key = mo.groups()
print(text,key)
if not key:key=text # implicit link name
if key.lower() not in refs:
raise KeyError('unknow reference %s' %key)
url = refs[key.lower()]
repl = '<a href="'+url.href+'"'
if url.alt:
repl += ' title="'+url.alt+'"'
repl += '>%s</a>' %text
src = re.sub(link_pattern2,repl,src,count=1)
# emphasis
# replace \* by *
src = re.sub(r'\\\*','*',src)
# replace \_ by _
src = re.sub(r'\\\_','_',src)
# _ and * surrounded by spaces are not markup
src = re.sub(r' _ ',' _ ',src)
src = re.sub(r' \* ',' * ',src)
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
# replace \` by `
src = re.sub(r'\\\`','`',src)
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# ordered lists
lines = src.split('\n')
atx_header_pattern = '^(#+)(.*)(#*)'
for i,line in enumerate(lines):
print('line [%s]' %line, line.startswith('#'))
mo = re.search(atx_header_pattern,line)
if not mo:continue
print('pattern matches')
level = len(mo.groups()[0])
lines[i] = re.sub(atx_header_pattern,
'<H%s>%s</H%s>\n' %(level,mo.groups()[1],level),
line,count=1)
src = '\n'.join(lines)
src = re.sub('\n\n+','\n<p>',src)+'\n'
return src,scripts
| gpl-3.0 |
pschmitt/home-assistant | tests/components/abode/test_init.py | 12 | 1508 | """Tests for the Abode module."""
from homeassistant.components.abode import (
DOMAIN as ABODE_DOMAIN,
SERVICE_CAPTURE_IMAGE,
SERVICE_SETTINGS,
SERVICE_TRIGGER_AUTOMATION,
)
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM_DOMAIN
from .common import setup_platform
from tests.async_mock import patch
async def test_change_settings(hass):
"""Test change_setting service."""
await setup_platform(hass, ALARM_DOMAIN)
with patch("abodepy.Abode.set_setting") as mock_set_setting:
await hass.services.async_call(
ABODE_DOMAIN,
SERVICE_SETTINGS,
{"setting": "confirm_snd", "value": "loud"},
blocking=True,
)
await hass.async_block_till_done()
mock_set_setting.assert_called_once()
async def test_unload_entry(hass):
"""Test unloading the Abode entry."""
mock_entry = await setup_platform(hass, ALARM_DOMAIN)
with patch("abodepy.Abode.logout") as mock_logout, patch(
"abodepy.event_controller.AbodeEventController.stop"
) as mock_events_stop:
assert await hass.config_entries.async_unload(mock_entry.entry_id)
mock_logout.assert_called_once()
mock_events_stop.assert_called_once()
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_SETTINGS)
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_CAPTURE_IMAGE)
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_TRIGGER_AUTOMATION)
| apache-2.0 |
androidarmv6/android_external_chromium_org | chrome/common/extensions/docs/server2/PRESUBMIT.py | 102 | 3631 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting extensions docs server
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
# Run build_server so that files needed by tests are copied to the local
# third_party directory.
import os
import sys
WHITELIST = [ r'.+_test.py$' ]
# The integration tests are selectively run from the PRESUBMIT in
# chrome/common/extensions.
BLACKLIST = [ r'integration_test.py$' ]
def _BuildServer(input_api):
try:
sys.path.insert(0, input_api.PresubmitLocalPath())
import build_server
build_server.main()
finally:
sys.path.pop(0)
def _ImportAppYamlHelper(input_api):
try:
sys.path.insert(0, input_api.PresubmitLocalPath())
from app_yaml_helper import AppYamlHelper
return AppYamlHelper
finally:
sys.path.pop(0)
def _WarnIfAppYamlHasntChanged(input_api, output_api):
app_yaml_path = os.path.join(input_api.PresubmitLocalPath(), 'app.yaml')
if app_yaml_path in input_api.AbsoluteLocalPaths():
return []
return [output_api.PresubmitPromptOrNotify('''
**************************************************
CHANGE DETECTED IN SERVER2 WITHOUT APP.YAML UPDATE
**************************************************
Maybe this is ok? Follow this simple guide:
Q: Does this change any data that might get stored?
* Did you add/remove/update a field to a data source?
* Did you add/remove/update some data that gets sent to templates?
* Is this change to support a new feature in the templates?
* Does this change include changes to templates?
Yes? Bump the middle version and zero out the end version, i.e. 2-5-2 -> 2-6-0.
THIS WILL CAUSE THE CURRENTLY RUNNING SERVER TO STOP UPDATING.
PUSH THE NEW VERSION ASAP.
No? Continue.
Q: Is this a non-trivial change to the server?
Yes? Bump the end version.
Unlike above, the server will *not* stop updating.
No? Are you sure? How much do you bet? This can't be rolled back...
Q: Is this a spelling correction? New test? Better comments?
Yes? Ok fine. Ignore this warning.
No? I guess this presubmit check doesn't work.
''')]
def _CheckYamlConsistency(input_api, output_api):
app_yaml_path = os.path.join(input_api.PresubmitLocalPath(), 'app.yaml')
cron_yaml_path = os.path.join(input_api.PresubmitLocalPath(), 'cron.yaml')
if not (app_yaml_path in input_api.AbsoluteLocalPaths() or
cron_yaml_path in input_api.AbsoluteLocalPaths()):
return []
AppYamlHelper = _ImportAppYamlHelper(input_api)
app_yaml_version = AppYamlHelper.ExtractVersion(
input_api.ReadFile(app_yaml_path))
cron_yaml_version = AppYamlHelper.ExtractVersion(
input_api.ReadFile(cron_yaml_path), key='target')
if app_yaml_version == cron_yaml_version:
return []
return [output_api.PresubmitError(
'Versions of app.yaml (%s) and cron.yaml (%s) must match' % (
app_yaml_version, cron_yaml_version))]
def _RunPresubmit(input_api, output_api):
_BuildServer(input_api)
return (
_WarnIfAppYamlHasntChanged(input_api, output_api) +
_CheckYamlConsistency(input_api, output_api) +
input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST, blacklist=BLACKLIST)
)
def CheckChangeOnUpload(input_api, output_api):
return _RunPresubmit(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _RunPresubmit(input_api, output_api)
| bsd-3-clause |
shlomif/PySolFC | pysollib/kivy/selecttree.py | 1 | 3420 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------#
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------#
# imports
# import tkFont
# Toolkit imports
from pysollib.kivy.tktree import MfxTreeInCanvas, MfxTreeLeaf, MfxTreeNode
# ************************************************************************
# * Nodes
# ************************************************************************
class SelectDialogTreeLeaf(MfxTreeLeaf):
def drawSymbol(self, x, y, **kw):
pass
'''
if self.tree.nodes.get(self.symbol_id) is not self:
self.symbol_id = self.tree.canvas.create_image(x, y,
image=self.tree.data.img[2 + (self.key is None)], anchor="nw")
self.tree.nodes[self.symbol_id] = self
'''
class SelectDialogTreeNode(MfxTreeNode):
def __init__(self, tree, text, select_func, expanded=0, parent_node=None):
MfxTreeNode.__init__(self, tree, parent_node,
text, key=None, expanded=expanded)
# callable or a tuple/list of MfxTreeNodes
self.select_func = select_func
def drawSymbol(self, x, y, **kw):
pass
'''
if self.tree.nodes.get(self.symbol_id) is not self:
self.symbol_id = self.tree.canvas.create_image(x, y,
image=self.tree.data.img[self.expanded], anchor="nw")
self.tree.nodes[self.symbol_id] = self
'''
def getContents(self):
# cached values
if self.subnodes is not None:
return self.subnodes
# print self.whoami()
if isinstance(self.select_func, (tuple, list)):
return self.select_func
return self._getContents()
def _getContents(self):
# subclass
return []
# ************************************************************************
# * Tree database
# ************************************************************************
class SelectDialogTreeData():
img = [] # loaded in Application.loadImages3
def __init__(self):
self.tree_xview = (0.0, 1.0)
self.tree_yview = (0.0, 1.0)
# ************************************************************************
# * Canvas that shows the tree (left side)
# ************************************************************************
class SelectDialogTreeCanvas(MfxTreeInCanvas):
def __init__(self, dialog, parent, key, default,
font=None, width=-1, height=-1, hbar=2, vbar=3):
pass
# not needed with kivy
'''
'''
| gpl-3.0 |
kimimj/scrapy | scrapy/utils/log.py | 108 | 6012 | # -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
logging.root.setLevel(logging.NOTSET)
handler = _get_handler(settings)
logging.root.addHandler(handler)
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Optional features available: %(features)s",
{'features': ", ".join(scrapy.optional_features)})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| bsd-3-clause |
jrahlf/3D-Non-Contact-Laser-Profilometer | xpcc/scons/site_tools/hosted.py | 1 | 2890 | #!/usr/bin/env python
#
# Copyright (c) 2009, Roboterclub Aachen e.V.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Roboterclub Aachen e.V. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ROBOTERCLUB AACHEN E.V. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ROBOTERCLUB AACHEN E.V. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import platform
from SCons.Script import *
# -----------------------------------------------------------------------------
def generate(env, **kw):
if platform.system() == 'Windows':
env.Append(ENV = {'PATH' : os.environ['PATH']})
env.Tool('default')
env.Append(CXXFLAGS = "/EHsc")
else:
env.Append(ENV = {'PATH' : os.environ['PATH']})
env.Tool('gcc')
env.Tool('g++')
env.Tool('gnulink')
env.Tool('ar')
env.Tool('as')
env['NM'] = "nm"
env['SIZE'] = "du -s -h"
# build messages
if ARGUMENTS.get('verbose') != '1':
env['CCCOMSTR'] = "Compiling C: $TARGET"
env['CXXCOMSTR'] = "Compiling C++: $TARGET"
env['ASCOMSTR'] = "Assembling: $TARGET"
env['ASPPCOMSTR'] = "Assembling: $TARGET"
env['LINKCOMSTR'] = "Linking: $TARGET"
env['RANLIBCOMSTR'] = "Indexing: $TARGET"
env['ARCOMSTR'] = "Create Library: $TARGET"
env['SIZECOMSTR'] = "Size after:"
env['SYMBOLSCOMSTR'] = "Show symbols for '$SOURCE':"
# flags for C and C++
env['CCFLAGS'] = [
"-funsigned-char",
"-funsigned-bitfields",
"-Wall",
"-Wextra",
"-Wundef",
"-ggdb",
"-DBASENAME=${SOURCE.file}",
]
# C++ flags
env['CXXFLAGS'] = [
"-std=gnu++0x",
# "-Weffc++",
"-Woverloaded-virtual",
]
def exists(env):
return env.Detect('g++')
| mit |
DazWorrall/ansible | lib/ansible/modules/packaging/os/redhat_subscription.py | 15 | 29767 | #!/usr/bin/python
# James Laska ([email protected])
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
version_added: "1.2"
author: "Barnaby Court (@barnabycourt)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- access.redhat.com or Sat6 username
required: False
default: null
password:
description:
- access.redhat.com or Sat6 password
required: False
default: null
server_hostname:
description:
- Specify an alternative Red Hat Subscription Management or Sat6 server
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
server_insecure:
description:
- Enable or disable https server certificate verification when connecting to C(server_hostname)
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
rhsm_baseurl:
description:
- Specify CDN baseurl
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
server_proxy_hostname:
description:
- Specify a HTTP proxy hostname
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
version_added: "2.4"
server_proxy_port:
description:
- Specify a HTTP proxy port
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
version_added: "2.4"
server_proxy_user:
description:
- Specify a user for HTTP proxy with basic authentication
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
version_added: "2.4"
server_proxy_password:
description:
- Specify a password for HTTP proxy with basic authentication
required: False
default: Current value from C(/etc/rhsm/rhsm.conf) is the default
version_added: "2.4"
autosubscribe:
description:
- Upon successful registration, auto-consume available subscriptions
required: False
default: False
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
org_id:
description:
- Organization ID to use in conjunction with activationkey
required: False
default: null
version_added: "2.0"
environment:
description:
- Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
required: False
default: null
version_added: "2.2"
pool:
description:
- |
Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
possible, as it is much faster. Mutually exclusive with I(pool_ids).
required: False
default: '^$'
pool_ids:
description:
- |
Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
default: []
version_added: "2.4"
consumer_type:
description:
- The type of unit to register, defaults to system
required: False
default: null
version_added: "2.1"
consumer_name:
description:
- Name of the system to register, defaults to the hostname
required: False
default: null
version_added: "2.1"
consumer_id:
description:
- |
References an existing consumer ID to resume using a previous registration
for this system. If the system's identity certificate is lost or corrupted,
this option allows it to resume using its previous identity and subscriptions.
The default is to not specify a consumer ID so a new ID is created.
required: False
default: null
version_added: "2.1"
force_register:
description:
- Register the system even if it is already registered
required: False
default: False
version_added: "2.2"
'''
EXAMPLES = '''
- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
redhat_subscription:
state: present
username: joe_user
password: somepass
autosubscribe: true
- name: Same as above but subscribe to a specific pool by ID.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids: 0123456789abcdef0123456789abcdef
- name: Register and subscribe to multiple pools.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef
- 1123456789abcdef0123456789abcdef
- name: Same as above but consume multiple entitlements.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef: 2
- 1123456789abcdef0123456789abcdef: 4
- name: Register and pull existing system data.
redhat_subscription:
state: present
username: joe_user
password: somepass
consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^Red Hat Enterprise Server$'
- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
redhat_subscription:
state: present
username: joe_user
password: somepass
environment: Library
autosubscribe: yes
'''
RETURN = '''
subscribed_pool_ids:
description: List of pool IDs to which system is now subscribed
returned: success
type: complex
contains: {
"8a85f9815ab905d3015ab928c7005de4": "1"
}
'''
import os
import re
import shutil
import tempfile
import types
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
SUBMAN_CMD = None
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
tmpfd, tmpfile = tempfile.mkstemp()
shutil.copy2(plugin_conf, tmpfile)
cfg = configparser.ConfigParser()
cfg.read([tmpfile])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(tmpfile, 'w+')
cfg.write(fd)
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = configparser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHSM
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--server.hostname'.
for k, v in kwargs.items():
if re.search(r'^(server|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_', '.', 1), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHSM.
'''
args = [SUBMAN_CMD, 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register, environment,
rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
server_proxy_port, server_proxy_user, server_proxy_password):
'''
Register the current system to the provided RHSM or Sat6 server
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'register']
# Generate command arguments
if force_register:
args.extend(['--force'])
if rhsm_baseurl:
args.extend(['--baseurl', rhsm_baseurl])
if server_insecure:
args.extend(['--insecure'])
if server_hostname:
args.extend(['--serverurl', server_hostname])
if activationkey:
args.extend(['--activationkey', activationkey])
args.extend(['--org', org_id])
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
if consumer_type:
args.extend(['--type', consumer_type])
if consumer_name:
args.extend(['--name', consumer_name])
if consumer_id:
args.extend(['--consumerid', consumer_id])
if environment:
args.extend(['--environment', environment])
if server_proxy_hostname and server_proxy_port:
args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
if server_proxy_user:
args.extend(['--proxyuser', server_proxy_user])
if server_proxy_password:
args.extend(['--proxypassword', server_proxy_password])
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self, serials=None):
'''
Unsubscribe a system from subscribed channels
Args:
serials(list or None): list of serials to unsubscribe. If
serials is none or an empty list, then
all subscribed channels will be removed.
Raises:
* Exception - if error occurs while running command
'''
items = []
if serials is not None and serials:
items = ["--serial=%s" % s for s in serials]
if serials is None:
items = ["--all"]
if items:
args = [SUBMAN_CMD, 'unsubscribe'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', False)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression. It matches regexp against available pool ids first.
If any pool ids match, subscribe to those pools and return.
If no pool ids match, then match regexp against available pool product
names. Note this can still easily match many many pools. Then subscribe
to those pools.
Since a pool id is a more specific match, we only fallback to matching
against names if we didn't match pool ids.
Raises:
* Exception - if error occurs while running command
'''
# See https://github.com/ansible/ansible/issues/19466
# subscribe to pools whose pool id matches regexp (and only the pool id)
subscribed_pool_ids = self.subscribe_pool(regexp)
# If we found any matches, we are done
# Don't attempt to match pools by product name
if subscribed_pool_ids:
return subscribed_pool_ids
# We didn't match any pool ids.
# Now try subscribing to pools based on product name match
# Note: This can match lots of product names.
subscribed_by_product_pool_ids = self.subscribe_product(regexp)
if subscribed_by_product_pool_ids:
return subscribed_by_product_pool_ids
# no matches
return []
def subscribe_by_pool_ids(self, pool_ids):
for pool_id, quantity in pool_ids.items():
args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity]
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return pool_ids
def subscribe_pool(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_pools(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def subscribe_product(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_products(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def update_subscriptions(self, regexp):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
serials = self.unsubscribe(serials=serials_to_remove)
subscribed_pool_ids = self.subscribe(regexp)
if subscribed_pool_ids or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
'unsubscribed_serials': serials}
def update_subscriptions_by_pool_ids(self, pool_ids):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
existing_pools = {}
for p in consumed_pools:
existing_pools[p.get_pool_id()] = p.QuantityUsed
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
serials = self.unsubscribe(serials=serials_to_remove)
missing_pools = {}
for pool_id, quantity in pool_ids.items():
if existing_pools.get(pool_id, 0) != quantity:
missing_pools[pool_id] = quantity
self.subscribe_by_pool_ids(missing_pools)
if missing_pools or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
'unsubscribed_serials': serials}
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def get_pool_id(self):
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.get_pool_id()
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module, consumed=False):
self.module = module
self.products = self._load_product_list(consumed)
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self, consumed=False):
"""
Loads list of all available or consumed pools for system in data structure
Args:
consumed(bool): if True list consumed pools, else list available pools (default False)
"""
args = "subscription-manager list"
if consumed:
args += " --consumed"
else:
args += " --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':', 1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
# else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter_pools(self, regexp='^$'):
'''
Return a list of RhsmPools whose pool id matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product.get_pool_id()):
yield product
def filter_products(self, regexp='^$'):
'''
Return a list of RhsmPools whose product name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
def main():
# Load RHSM configuration from file
rhsm = Rhsm(None)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present',
choices=['present', 'absent']),
username=dict(default=None,
required=False),
password=dict(default=None,
required=False,
no_log=True),
server_hostname=dict(default=rhsm.config.get_option('server.hostname'),
required=False),
server_insecure=dict(default=rhsm.config.get_option('server.insecure'),
required=False),
rhsm_baseurl=dict(default=rhsm.config.get_option('rhsm.baseurl'),
required=False),
autosubscribe=dict(default=False,
type='bool'),
activationkey=dict(default=None,
required=False),
org_id=dict(default=None,
required=False),
environment=dict(default=None,
required=False, type='str'),
pool=dict(default='^$',
required=False,
type='str'),
pool_ids=dict(default=[],
required=False,
type='list'),
consumer_type=dict(default=None,
required=False),
consumer_name=dict(default=None,
required=False),
consumer_id=dict(default=None,
required=False),
force_register=dict(default=False,
type='bool'),
server_proxy_hostname=dict(default=rhsm.config.get_option('server.proxy_hostname'),
required=False),
server_proxy_port=dict(default=rhsm.config.get_option('server.proxy_port'),
required=False),
server_proxy_user=dict(default=rhsm.config.get_option('server.proxy_user'),
required=False),
server_proxy_password=dict(default=rhsm.config.get_option('server.proxy_password'),
required=False,
no_log=True),
),
required_together=[['username', 'password'], ['activationkey', 'org_id'],
['server_proxy_hostname', 'server_proxy_port'], ['server_proxy_user', 'server_proxy_password']],
mutually_exclusive=[['username', 'activationkey'], ['pool', 'pool_ids']],
required_if=[['state', 'present', ['username', 'activationkey'], True]],
)
rhsm.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
autosubscribe = module.params['autosubscribe']
activationkey = module.params['activationkey']
org_id = module.params['org_id']
environment = module.params['environment']
pool = module.params['pool']
pool_ids = {}
for value in module.params['pool_ids']:
if isinstance(value, dict):
if len(value) != 1:
module.fail_json(msg='Unable to parse pool_ids option.')
pool_id, quantity = value.items()[0]
else:
pool_id, quantity = value, 1
pool_ids[pool_id] = str(quantity)
consumer_type = module.params["consumer_type"]
consumer_name = module.params["consumer_name"]
consumer_id = module.params["consumer_id"]
force_register = module.params["force_register"]
server_proxy_hostname = module.params['server_proxy_hostname']
server_proxy_port = module.params['server_proxy_port']
server_proxy_user = module.params['server_proxy_user']
server_proxy_password = module.params['server_proxy_password']
global SUBMAN_CMD
SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
# Ensure system is registered
if state == 'present':
# Register system
if rhsm.is_registered and not force_register:
if pool != '^$' or pool_ids:
try:
if pool_ids:
result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
else:
result = rhsm.update_subscriptions(pool)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e))
else:
module.exit_json(**result)
else:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhsm.enable()
rhsm.configure(**module.params)
rhsm.register(username, password, autosubscribe, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register,
environment, rhsm_baseurl, server_insecure, server_hostname,
server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password)
if pool_ids:
subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
else:
subscribed_pool_ids = rhsm.subscribe(pool)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e))
else:
module.exit_json(changed=True,
msg="System successfully registered to '%s'." % server_hostname,
subscribed_pool_ids=subscribed_pool_ids)
# Ensure system is *not* registered
if state == 'absent':
if not rhsm.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhsm.unsubscribe()
rhsm.unregister()
except Exception:
e = get_exception()
module.fail_json(msg="Failed to unregister: %s" % e)
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
if __name__ == '__main__':
main()
| gpl-3.0 |
alexallah/django | django/contrib/admindocs/urls.py | 97 | 1192 | from django.conf.urls import url
from django.contrib.admindocs import views
urlpatterns = [
url(r'^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url(r'^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url(r'^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url(r'^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url(r'^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url(r'^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url(r'^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url(r'^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url(r'^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
]
| bsd-3-clause |
chris-allan/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/templatetags/webclient_tags.py | 3 | 1519 | #!/usr/bin/env python
#
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import datetime
import traceback
import logging
from django.conf import settings
from django import template
from django.templatetags.static import PrefixNode
register = template.Library()
logger = logging.getLogger(__name__)
@register.tag()
def get_static_webclient_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.WEBCLIENT_STATIC_URL``.
Usage::
{% get_static_webclient_prefix [as varname] %}
Examples::
{% get_static_webclient_prefix %}
{% get_static_webclient_prefix as STATIC_WEBCLIENT_PREFIX %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_WEBCLIENT_URL")
| gpl-2.0 |
kingoflolz/hearthbreaker | jsonschema/exceptions.py | 35 | 6973 | from collections import defaultdict, deque
import itertools
import pprint
import textwrap
from jsonschema import _utils
from jsonschema.compat import PY3, iteritems
WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
STRONG_MATCHES = frozenset()
_unset = _utils.Unset()
class _Error(Exception):
def __init__(
self,
message,
validator=_unset,
path=(),
cause=None,
context=(),
validator_value=_unset,
instance=_unset,
schema=_unset,
schema_path=(),
parent=None,
):
super(_Error, self).__init__(
message,
validator,
path,
cause,
context,
validator_value,
instance,
schema,
schema_path,
parent,
)
self.message = message
self.path = self.relative_path = deque(path)
self.schema_path = self.relative_schema_path = deque(schema_path)
self.context = list(context)
self.cause = self.__cause__ = cause
self.validator = validator
self.validator_value = validator_value
self.instance = instance
self.schema = schema
self.parent = parent
for error in context:
error.parent = self
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.message)
def __str__(self):
return unicode(self).encode("utf-8")
def __unicode__(self):
essential_for_verbose = (
self.validator, self.validator_value, self.instance, self.schema,
)
if any(m is _unset for m in essential_for_verbose):
return self.message
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return self.message + textwrap.dedent("""
Failed validating %r in schema%s:
%s
On instance%s:
%s
""".rstrip()
) % (
self.validator,
_utils.format_as_index(list(self.relative_schema_path)[:-1]),
_utils.indent(pschema),
_utils.format_as_index(self.relative_path),
_utils.indent(pinstance),
)
if PY3:
__str__ = __unicode__
@classmethod
def create_from(cls, other):
return cls(**other._contents())
@property
def absolute_path(self):
parent = self.parent
if parent is None:
return self.relative_path
path = deque(self.relative_path)
path.extendleft(reversed(parent.absolute_path))
return path
@property
def absolute_schema_path(self):
parent = self.parent
if parent is None:
return self.relative_schema_path
path = deque(self.relative_schema_path)
path.extendleft(reversed(parent.absolute_schema_path))
return path
def _set(self, **kwargs):
for k, v in iteritems(kwargs):
if getattr(self, k) is _unset:
setattr(self, k, v)
def _contents(self):
attrs = (
"message", "cause", "context", "validator", "validator_value",
"path", "schema_path", "instance", "schema", "parent",
)
return dict((attr, getattr(self, attr)) for attr in attrs)
class ValidationError(_Error):
pass
class SchemaError(_Error):
pass
class RefResolutionError(Exception):
pass
class UnknownType(Exception):
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
self.schema = schema
def __str__(self):
return unicode(self).encode("utf-8")
def __unicode__(self):
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return textwrap.dedent("""
Unknown type %r for validator with schema:
%s
While checking instance:
%s
""".rstrip()
) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
if PY3:
__str__ = __unicode__
class FormatError(Exception):
def __init__(self, message, cause=None):
super(FormatError, self).__init__(message, cause)
self.message = message
self.cause = self.__cause__ = cause
def __str__(self):
return self.message.encode("utf-8")
def __unicode__(self):
return self.message
if PY3:
__str__ = __unicode__
class ErrorTree(object):
"""
ErrorTrees make it easier to check which validations failed.
"""
_instance = _unset
def __init__(self, errors=()):
self.errors = {}
self._contents = defaultdict(self.__class__)
for error in errors:
container = self
for element in error.path:
container = container[element]
container.errors[error.validator] = error
self._instance = error.instance
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
"""
return index in self._contents
def __getitem__(self, index):
"""
Retrieve the child tree one level down at the given ``index``.
If the index is not in the instance that this tree corresponds to and
is not known by this tree, whatever error would be raised by
``instance.__getitem__`` will be propagated (usually this is some
subclass of :class:`LookupError`.
"""
if self._instance is not _unset and index not in self:
self._instance[index]
return self._contents[index]
def __setitem__(self, index, value):
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
"""
return iter(self._contents)
def __len__(self):
"""
Same as :attr:`total_errors`.
"""
return self.total_errors
def __repr__(self):
return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
@property
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
return len(self.errors) + child_errors
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
def relevance(error):
validator = error.validator
return -len(error.path), validator not in weak, validator in strong
return relevance
relevance = by_relevance()
def best_match(errors, key=relevance):
errors = iter(errors)
best = next(errors, None)
if best is None:
return
best = max(itertools.chain([best], errors), key=key)
while best.context:
best = min(best.context, key=key)
return best
| mit |
avanzosc/odoo-addons | contract_sale_school/wizards/contract_line_create.py | 1 | 2167 | # Copyright 2020 Oihane Crucelaegui - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class ContractLineCreate(models.TransientModel):
_name = "contract.line.create"
_description = "Contract Line Creation Wizard"
student_ids = fields.Many2many(comodel_name="res.partner")
product_id = fields.Many2one(
comodel_name="product.product", string="Product")
date_start = fields.Date()
date_end = fields.Date()
unit_price = fields.Float()
@api.model
def default_get(self, fields):
res = super(ContractLineCreate, self).default_get(fields)
if self.env.context.get("active_model") == "res.partner":
res.update({
"student_ids": [(6, 0, self.env.context.get("active_ids"))],
})
return res
@api.onchange("product_id")
def _onchange_product_id(self):
self.ensure_one()
self.unit_price = self.product_id.lst_price
@api.multi
def button_create_contract_line(self):
contract_line_obj = self.sudo().env["contract.line"]
academic_year = self.env["education.academic_year"].search([
("date_start", "<=", self.date_start),
("date_end", ">=", self.date_end)
], limit=1)
for student in self.student_ids.filtered(
lambda s: s.educational_category == "student"):
for payer in student.child2_ids.filtered("payer"):
contract_line_obj.create_contract_line(
payer.responsible_id, payer.payment_percentage,
self.product_id, 1.0, self.unit_price, 0.0,
self.product_id.company_id, academic_year,
self.product_id.center_id, student.current_course_id,
student, date_start=self.date_start,
date_end=self.date_end)
if self.product_id.education_type and (
self.product_id not in student.additional_product_ids):
student.write({
"additional_product_ids": [(4, self.product_id.id)],
})
| agpl-3.0 |
ahamilton55/ansible | lib/ansible/modules/network/panos/panos_lic.py | 78 | 4915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/MySQL-python-1.2.5/MySQLdb/__init__.py | 76 | 3229 | """MySQLdb - A DB API v2.0 compatible interface to MySQL.
This package is a wrapper around _mysql, which mostly implements the
MySQL C API.
connect() -- connects to server
See the C API specification and the MySQL documentation for more info
on other items.
For information on how MySQLdb handles type conversion, see the
MySQLdb.converters module.
"""
__revision__ = """$Revision$"""[11:-2]
from MySQLdb.release import __version__, version_info, __author__
import _mysql
if version_info != _mysql.version_info:
raise ImportError("this is MySQLdb version %s, but _mysql is version %r" %
(version_info, _mysql.version_info))
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
from _mysql import *
from MySQLdb.constants import FIELD_TYPE
from MySQLdb.times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
class DBAPISet(frozenset):
"""A special type of set for which A == x is true if A is a
DBAPISet and x is a member of that set."""
def __eq__(self, other):
if isinstance(other, DBAPISet):
return not self.difference(other)
return other in self
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def test_DBAPISet_set_equality():
assert STRING == STRING
def test_DBAPISet_set_inequality():
assert STRING != NUMBER
def test_DBAPISet_set_equality_membership():
assert FIELD_TYPE.VAR_STRING == STRING
def test_DBAPISet_set_inequality_membership():
assert FIELD_TYPE.DATE != STRING
def Binary(x):
return str(x)
def Connect(*args, **kwargs):
"""Factory function for connections.Connection."""
from MySQLdb.connections import Connection
return Connection(*args, **kwargs)
connect = Connection = Connect
__all__ = [ 'BINARY', 'Binary', 'Connect', 'Connection', 'DATE',
'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks',
'TimestampFromTicks', 'DataError', 'DatabaseError', 'Error',
'FIELD_TYPE', 'IntegrityError', 'InterfaceError', 'InternalError',
'MySQLError', 'NULL', 'NUMBER', 'NotSupportedError', 'DBAPISet',
'OperationalError', 'ProgrammingError', 'ROWID', 'STRING', 'TIME',
'TIMESTAMP', 'Warning', 'apilevel', 'connect', 'connections',
'constants', 'converters', 'cursors', 'debug', 'escape', 'escape_dict',
'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'string_literal', 'threadsafety', 'version_info']
| gpl-2.0 |
Roboy/MKR1000_Lighthouse_Embed | tools/gcc-arm-none-eabi/arm-none-eabi/share/gdb/python/gdb/function/strfns.py | 137 | 2684 | # Useful gdb string convenience functions.
# Copyright (C) 2012-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""$_memeq, $_strlen, $_streq, $_regex"""
import gdb
import re
class _MemEq(gdb.Function):
"""$_memeq - compare bytes of memory
Usage:
$_memeq(a, b, len)
Returns:
True if len bytes at a and b compare equally.
"""
def __init__(self):
super(_MemEq, self).__init__("_memeq")
def invoke(self, a, b, length):
if length < 0:
raise ValueError("length must be non-negative")
if length == 0:
return True
# The argument(s) to vector are [low_bound,]high_bound.
byte_vector = gdb.lookup_type("char").vector(length - 1)
ptr_byte_vector = byte_vector.pointer()
a_ptr = a.reinterpret_cast(ptr_byte_vector)
b_ptr = b.reinterpret_cast(ptr_byte_vector)
return a_ptr.dereference() == b_ptr.dereference()
class _StrLen(gdb.Function):
"""$_strlen - compute string length
Usage:
$_strlen(a)
Returns:
Length of string a, assumed to be a string in the current language.
"""
def __init__(self):
super(_StrLen, self).__init__("_strlen")
def invoke(self, a):
s = a.string()
return len(s)
class _StrEq(gdb.Function):
"""$_streq - check string equality
Usage:
$_streq(a, b)
Returns:
True if a and b are identical strings in the current language.
Example (amd64-linux):
catch syscall open
cond $bpnum $_streq((char*) $rdi, "foo")
"""
def __init__(self):
super(_StrEq, self).__init__("_streq")
def invoke(self, a, b):
return a.string() == b.string()
class _RegEx(gdb.Function):
"""$_regex - check if a string matches a regular expression
Usage:
$_regex(string, regex)
Returns:
True if string str (in the current language) matches the
regular expression regex.
"""
def __init__(self):
super(_RegEx, self).__init__("_regex")
def invoke(self, string, regex):
s = string.string()
r = re.compile(regex.string())
return bool(r.match(s))
# GDB will import us automagically via gdb/__init__.py.
_MemEq()
_StrLen()
_StrEq()
_RegEx()
| gpl-3.0 |
beni55/networkx | networkx/convert.py | 12 | 13210 | """Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_pygraphviz, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph'): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data,'node'): # data.node should be dict-like
result.node.update( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
| bsd-3-clause |
thnee/ansible | lib/ansible/modules/storage/netapp/_sf_volume_access_group_manager.py | 59 | 8956 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_access_group_manager
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_access_group)
short_description: Manage SolidFire Volume Access Groups
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar (@timuster) <[email protected]>
description:
- Create, destroy, or update volume access groups on SolidFire
options:
state:
description:
- Whether the specified volume access group should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the volume access group. It is not required to be unique, but recommended.
required: true
initiators:
description:
- List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
volumes:
description:
- List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
virtual_network_id:
description:
- The ID of the SolidFire Virtual Network ID to associate the volume access group with.
virtual_network_tags:
description:
- The ID of the VLAN Virtual Network Tag to associate the volume access group with.
attributes:
description: List of Name/Value pairs in JSON object format.
volume_access_group_id:
description:
- The ID of the volume access group to modify or delete.
'''
EXAMPLES = """
- name: Create Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVolumeAccessGroup
volumes: [7,8]
- name: Modify Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
volume_access_group_id: 1
name: AnsibleVolumeAccessGroup-Renamed
attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
- name: Delete Volume Access Group
sf_volume_access_group_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
volume_access_group_id: 1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolumeAccessGroup(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
volume_access_group_id=dict(required=False, type='int', default=None),
initiators=dict(required=False, type='list', default=None),
volumes=dict(required=False, type='list', default=None),
virtual_network_id=dict(required=False, type='list', default=None),
virtual_network_tags=dict(required=False, type='list', default=None),
attributes=dict(required=False, type='dict', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.volume_access_group_id = p['volume_access_group_id']
self.initiators = p['initiators']
self.volumes = p['volumes']
self.virtual_network_id = p['virtual_network_id']
self.virtual_network_tags = p['virtual_network_tags']
self.attributes = p['attributes']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume_access_group(self):
access_groups_list = self.sfe.list_volume_access_groups()
for group in access_groups_list.volume_access_groups:
if group.name == self.name:
# Update self.volume_access_group_id:
if self.volume_access_group_id is not None:
if group.volume_access_group_id == self.volume_access_group_id:
return group
else:
self.volume_access_group_id = group.volume_access_group_id
return group
return None
def create_volume_access_group(self):
try:
self.sfe.create_volume_access_group(name=self.name,
initiators=self.initiators,
volumes=self.volumes,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error creating volume access group %s: %s" %
(self.name, to_native(e)), exception=traceback.format_exc())
def delete_volume_access_group(self):
try:
self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
except Exception as e:
self.module.fail_json(msg="Error deleting volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)),
exception=traceback.format_exc())
def update_volume_access_group(self):
try:
self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
virtual_network_id=self.virtual_network_id,
virtual_network_tags=self.virtual_network_tags,
name=self.name,
initiators=self.initiators,
volumes=self.volumes,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg="Error updating volume access group %s: %s" %
(self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
def apply(self):
changed = False
group_exists = False
update_group = False
group_detail = self.get_volume_access_group()
if group_detail:
group_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the group
if self.volumes is not None and group_detail.volumes != self.volumes:
update_group = True
changed = True
elif self.initiators is not None and group_detail.initiators != self.initiators:
update_group = True
changed = True
elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
self.attributes is not None:
update_group = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not group_exists:
self.create_volume_access_group()
elif update_group:
self.update_volume_access_group()
elif self.state == 'absent':
self.delete_volume_access_group()
self.module.exit_json(changed=changed)
def main():
v = SolidFireVolumeAccessGroup()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
catmaid/CATMAID | django/applications/catmaid/control/user.py | 2 | 6080 | # -*- coding: utf-8 -*-
import json
from typing import Any, Dict, Iterable
from guardian.utils import get_anonymous_user
from django.http import HttpRequest, JsonResponse
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth import views as auth_views
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib.auth.models import User
import django.contrib.auth.views as django_auth_views
from catmaid.control.authentication import (access_check, PermissionError)
from catmaid.control.common import get_request_bool
def not_anonymous(user):
"""Return true if the the user is neither Django's nor Guardian's anonymous
user.
"""
return user.is_authenticated and user != get_anonymous_user()
@user_passes_test(access_check)
def user_list(request:HttpRequest) -> JsonResponse:
"""List registered users in this CATMAID instance. Must be logged in.
An administrator can export users including their encrpyted password. This
is meant to import users into other CATMAID instances.
---
parameters:
- name: with_passwords
description: |
Export encrypted passwords. Requires admin access.
required: false
type: boolean,
default: false
"""
with_passwords = get_request_bool(request.GET, 'with_passwords', False)
if with_passwords:
# Make sure user is an admin and part of the staff
if not request.user.is_staff and not request.user.is_superuser:
raise PermissionError("Superuser permissions required to export "
"encrypted user passwords")
result = []
for u in User.objects.all().select_related('userprofile') \
.order_by('last_name', 'first_name'):
up = u.userprofile
user_data = {
"id": u.id,
"login": u.username,
"full_name": u.get_full_name(),
"first_name": u.first_name,
"last_name": u.last_name,
"color": (up.color.r, up.color.g, up.color.b),
"primary_group_id": up.primary_group_id,
}
if with_passwords:
# Append encypted user password
user_data['password'] = u.password
result.append(user_data)
return JsonResponse(result, safe=False)
@user_passes_test(access_check)
def user_list_datatable(request:HttpRequest) -> JsonResponse:
display_start = int(request.POST.get('iDisplayStart', 0))
display_length = int(request.POST.get('iDisplayLength', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
should_sort = request.POST.get('iSortCol_0', False)
user_query = User.objects.all()
# By default, there is no need to explicitly request a distinct result
distinct = False
# This field can be used to only return users that have used a certain
# annotation.
annotations = [v for k,v in request.POST.items()
if k.startswith('annotations[')]
for annotation in annotations:
user_query = user_query.filter(
classinstanceclassinstance__relation__relation_name='annotated_with',
classinstanceclassinstance__class_instance_b__name=annotation
)
# Make sure we only get distinct user names
distinct = True
# The neuron_id field can be used to constrain the result by only showing
# users that annotated a certain neuron.
neuron_annotated = request.POST.get('neuron_id', None)
if neuron_annotated:
user_query = user_query.filter(
classinstanceclassinstance__relation__relation_name='annotated_with',
classinstanceclassinstance__class_instance_a__id=neuron_annotated
)
# Make sure we only get distinct user names
distinct = True
if distinct:
user_query = user_query.distinct()
if should_sort:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions: Iterable[str] = [
request.POST.get('sSortDir_%d' % d, 'DESC')
for d in range(column_count)]
sorting_directions = map(lambda d: '-' if d.upper() == 'DESC' else '',
sorting_directions)
fields = ['username', 'first_name', 'last_name', 'id']
sorting_index = [int(request.POST.get('iSortCol_%d' % d))
for d in range(column_count)]
sorting_cols = map(lambda i: fields[i], sorting_index)
user_query = user_query.extra(order_by=[di + col for (di, col) in zip(
sorting_directions, sorting_cols)])
num_records = len(user_query)
result = list(user_query[display_start:display_start + display_length])
response:Dict[str, Any] = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
'aaData': []
}
for user in result:
response['aaData'] += [[
user.username,
user.first_name,
user.last_name,
user.id,
]]
return JsonResponse(response)
@user_passes_test(access_check)
def update_user_profile(request:HttpRequest) -> JsonResponse:
""" Allows users to update some of their user settings.
If the request is done by the anonymous user, nothing is updated, but
no error is raised.
"""
# Ignore anonymous user
if request.user == get_anonymous_user() or not request.user.is_authenticated:
return JsonResponse({'success': "The user profile of the " +
"anonymous user won't be updated"})
# Save user profile
request.user.userprofile.save()
return JsonResponse({'success': 'Updated user profile'})
class NonAnonymousPasswordChangeView(UserPassesTestMixin, auth_views.PasswordChangeView):
"""Only allow password changes for non-anonymous users.
"""
def test_func(self):
return not_anonymous(self.request.user)
def handle_no_permission(self):
return auth_views.redirect_to_login(self.request.get_full_path(),
self.get_login_url(), self.get_redirect_field_name())
| gpl-3.0 |
kawasaki2013/python-for-android-x86 | python-modules/twisted/twisted/python/urlpath.py | 81 | 3431 | # -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
import urlparse
import urllib
class URLPath:
def __init__(self, scheme='', netloc='localhost', path='',
query='', fragment=''):
self.scheme = scheme or 'http'
self.netloc = netloc
self.path = path or '/'
self.query = query
self.fragment = fragment
_qpathlist = None
_uqpathlist = None
def pathList(self, unquote=0, copy=1):
if self._qpathlist is None:
self._qpathlist = self.path.split('/')
self._uqpathlist = map(urllib.unquote, self._qpathlist)
if unquote:
result = self._uqpathlist
else:
result = self._qpathlist
if copy:
return result[:]
else:
return result
def fromString(klass, st):
t = urlparse.urlsplit(st)
u = klass(*t)
return u
fromString = classmethod(fromString)
def fromRequest(klass, request):
return klass.fromString(request.prePathURL())
fromRequest = classmethod(fromRequest)
def _pathMod(self, newpathsegs, keepQuery):
if keepQuery:
query = self.query
else:
query = ''
return URLPath(self.scheme,
self.netloc,
'/'.join(newpathsegs),
query)
def sibling(self, path, keepQuery=0):
l = self.pathList()
l[-1] = path
return self._pathMod(l, keepQuery)
def child(self, path, keepQuery=0):
l = self.pathList()
if l[-1] == '':
l[-1] = path
else:
l.append(path)
return self._pathMod(l, keepQuery)
def parent(self, keepQuery=0):
l = self.pathList()
if l[-1] == '':
del l[-2]
else:
# We are a file, such as http://example.com/foo/bar
# our parent directory is http://example.com/
l.pop()
l[-1] = ''
return self._pathMod(l, keepQuery)
def here(self, keepQuery=0):
l = self.pathList()
if l[-1] != '':
l[-1] = ''
return self._pathMod(l, keepQuery)
def click(self, st):
"""Return a path which is the URL where a browser would presumably take
you if you clicked on a link with an HREF as given.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(st)
if not scheme:
scheme = self.scheme
if not netloc:
netloc = self.netloc
if not path:
path = self.path
if not query:
query = self.query
elif path[0] != '/':
l = self.pathList()
l[-1] = path
path = '/'.join(l)
return URLPath(scheme,
netloc,
path,
query,
fragment)
def __str__(self):
x = urlparse.urlunsplit((
self.scheme, self.netloc, self.path,
self.query, self.fragment))
return x
def __repr__(self):
return ('URLPath(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)'
% (self.scheme, self.netloc, self.path, self.query, self.fragment))
| apache-2.0 |
ikezue/ardupilot-arducopter-3.1.0 | Tools/autotest/dump_logs.py | 229 | 1762 | #!/usr/bin/env python
# dump flash logs from SITL
# Andrew Tridgell, April 2013
import pexpect, os, sys, shutil, atexit
import optparse, fnmatch, time, glob, traceback, signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pysim'))
import util
############## main program #############
parser = optparse.OptionParser(sys.argv[0])
parser.add_option("--cli", action='store_true', default=False, help='put us in the CLI menu in logs')
opts, args = parser.parse_args()
os.environ['PYTHONUNBUFFERED'] = '1'
def dump_logs(atype):
'''dump DataFlash logs'''
logfile = '%s.log' % atype
print("Dumping logs for %s to %s" % (atype, logfile))
sil = util.start_SIL(atype)
log = open(logfile, mode='w')
mavproxy = util.start_MAVProxy_SIL(atype, setup=True, logfile=log)
mavproxy.send('\n\n\n')
print("navigating menus")
mavproxy.expect(']')
mavproxy.send("logs\n")
if opts.cli:
mavproxy.interact()
return
mavproxy.expect("logs enabled:")
lognums = []
i = mavproxy.expect(["No logs", "(\d+) logs"])
if i == 0:
numlogs = 0
else:
numlogs = int(mavproxy.match.group(1))
for i in range(numlogs):
mavproxy.expect("Log (\d+)")
lognums.append(int(mavproxy.match.group(1)))
mavproxy.expect("Log]")
for i in range(numlogs):
print("Dumping log %u (i=%u)" % (lognums[i], i))
mavproxy.send("dump %u\n" % lognums[i])
mavproxy.expect("logs enabled:", timeout=120)
mavproxy.expect("Log]")
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
log.close()
print("Saved log for %s to %s" % (atype, logfile))
return True
vehicle = os.path.basename(os.getcwd())
dump_logs(vehicle)
| gpl-3.0 |
coen-hyde/dotfiles | libs/eb/lib/aws/requests/api.py | 32 | 4285 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| mit |
taotie12010/bigfour | cms/djangoapps/contentstore/management/commands/export_all_courses.py | 91 | 2323 | """
Script for exporting all courseware from Mongo to a directory and listing the courses which failed to export
"""
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
class Command(BaseCommand):
"""
Export all courses from mongo to the specified data directory and list the courses which failed to export
"""
help = 'Export all courses from mongo to the specified data directory and list the courses which failed to export'
def handle(self, *args, **options):
"""
Execute the command
"""
if len(args) != 1:
raise CommandError("export requires one argument: <output path>")
output_path = args[0]
courses, failed_export_courses = export_courses_to_output_path(output_path)
print("=" * 80)
print(u"=" * 30 + u"> Export summary")
print(u"Total number of courses to export: {0}".format(len(courses)))
print(u"Total number of courses which failed to export: {0}".format(len(failed_export_courses)))
print(u"List of export failed courses ids:")
print(u"\n".join(failed_export_courses))
print("=" * 80)
def export_courses_to_output_path(output_path):
"""
Export all courses to target directory and return the list of courses which failed to export
"""
content_store = contentstore()
module_store = modulestore()
root_dir = output_path
courses = module_store.get_courses()
course_ids = [x.id for x in courses]
failed_export_courses = []
for course_id in course_ids:
print(u"-" * 80)
print(u"Exporting course id = {0} to {1}".format(course_id, output_path))
try:
course_dir = course_id.to_deprecated_string().replace('/', '...')
export_course_to_xml(module_store, content_store, course_id, root_dir, course_dir)
except Exception as err: # pylint: disable=broad-except
failed_export_courses.append(unicode(course_id))
print(u"=" * 30 + u"> Oops, failed to export {0}".format(course_id))
print(u"Error:")
print(err)
return courses, failed_export_courses
| agpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pythonwin/pywin/tools/browser.py | 17 | 11796 | # basic module browser.
# usage:
# >>> import browser
# >>> browser.Browse()
# or
# >>> browser.Browse(your_module)
import sys
import types
import __main__
import win32ui
from pywin.mfc import dialog
import hierlist
special_names = [ '__doc__', '__name__', '__self__' ]
#
# HierList items
class HLIPythonObject(hierlist.HierListItem):
def __init__(self, myobject=None, name=None ):
hierlist.HierListItem.__init__(self)
self.myobject = myobject
self.knownExpandable = None
if name:
self.name=name
else:
try:
self.name=myobject.__name__
except (AttributeError, TypeError):
try:
r = repr(myobject)
if len(r)>20:
r = r[:20] + "..."
self.name=r
except (AttributeError, TypeError):
self.name="???"
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __repr__(self):
try:
type = self.GetHLIType()
except:
type = "Generic"
return "HLIPythonObject("+type+") - name: "+ self.name + " object: " + repr(self.myobject)
def GetText(self):
try:
return str(self.name) + ' (' + self.GetHLIType() + ')'
except AttributeError:
return str(self.name) + ' = ' + repr(self.myobject)
def InsertDocString(self, lst):
ob = None
try:
ob = self.myobject.__doc__
except (AttributeError, TypeError):
pass
# I don't quite grok descriptors enough to know how to
# best hook them up. Eg:
# >>> object.__getattribute__.__class__.__doc__
# <attribute '__doc__' of 'wrapper_descriptor' objects>
if ob and isinstance(ob, str):
lst.insert(0, HLIDocString( ob, "Doc" ))
def GetSubList(self):
ret = []
try:
for (key, ob) in self.myobject.__dict__.iteritems():
if key not in special_names:
ret.append(MakeHLI( ob, key ) )
except (AttributeError, TypeError):
pass
try:
for name in self.myobject.__methods__:
ret.append(HLIMethod( name )) # no MakeHLI, as cant auto detect
except (AttributeError, TypeError):
pass
try:
for member in self.myobject.__members__:
if not member in special_names:
ret.append(MakeHLI(getattr(self.myobject, member), member))
except (AttributeError, TypeError):
pass
ret.sort()
self.InsertDocString(ret)
return ret
# if the has a dict, it is expandable.
def IsExpandable(self):
if self.knownExpandable is None:
self.knownExpandable = self.CalculateIsExpandable()
return self.knownExpandable
def CalculateIsExpandable(self):
if hasattr(self.myobject, '__doc__'):
return 1
try:
for key in self.myobject.__dict__.iterkeys():
if key not in special_names:
return 1
except (AttributeError, TypeError):
pass
try:
self.myobject.__methods__
return 1
except (AttributeError, TypeError):
pass
try:
for item in self.myobject.__members__:
if item not in special_names:
return 1
except (AttributeError, TypeError):
pass
return 0
def GetBitmapColumn(self):
if self.IsExpandable():
return 0
else:
return 4
def TakeDefaultAction(self):
ShowObject(self.myobject, self.name)
class HLIDocString(HLIPythonObject):
def GetHLIType(self):
return "DocString"
def GetText(self):
return self.myobject.strip()
def IsExpandable(self):
return 0
def GetBitmapColumn(self):
return 6
class HLIModule(HLIPythonObject):
def GetHLIType(self):
return "Module"
class HLIFrame(HLIPythonObject):
def GetHLIType(self):
return "Stack Frame"
class HLITraceback(HLIPythonObject):
def GetHLIType(self):
return "Traceback"
class HLIClass(HLIPythonObject):
def GetHLIType(self):
return "Class"
def GetSubList(self):
ret = []
for base in self.myobject.__bases__:
ret.append( MakeHLI(base, 'Base class: ' + base.__name__ ) )
ret = ret + HLIPythonObject.GetSubList(self)
return ret
class HLIMethod(HLIPythonObject):
# myobject is just a string for methods.
def GetHLIType(self):
return "Method"
def GetText(self):
return "Method: " + self.myobject + '()'
class HLICode(HLIPythonObject):
def GetHLIType(self):
return "Code"
def IsExpandable(self):
return self.myobject
def GetSubList(self):
ret = []
ret.append( MakeHLI( self.myobject.co_consts, "Constants (co_consts)" ))
ret.append( MakeHLI( self.myobject.co_names, "Names (co_names)" ))
ret.append( MakeHLI( self.myobject.co_filename, "Filename (co_filename)" ))
ret.append( MakeHLI( self.myobject.co_argcount, "Number of args (co_argcount)"))
ret.append( MakeHLI( self.myobject.co_varnames, "Param names (co_varnames)"))
return ret
class HLIInstance(HLIPythonObject):
def GetHLIType(self):
return "Instance"
def GetText(self):
return str(self.name) + ' (Instance of class ' + str(self.myobject.__class__.__name__) + ')'
def IsExpandable(self):
return 1
def GetSubList(self):
ret = []
ret.append( MakeHLI( self.myobject.__class__) )
ret = ret + HLIPythonObject.GetSubList(self)
return ret
class HLIBuiltinFunction(HLIPythonObject):
def GetHLIType(self):
return "Builtin Function"
class HLIFunction(HLIPythonObject):
def GetHLIType(self):
return "Function"
def IsExpandable(self):
return 1
def GetSubList(self):
ret = []
# ret.append( MakeHLI( self.myobject.func_argcount, "Arg Count" ))
try:
ret.append( MakeHLI( self.myobject.func_argdefs, "Arg Defs" ))
except AttributeError:
pass
try:
code = self.myobject.__code__
globs = self.myobject.__globals__
except AttributeError:
# must be py2.5 or earlier...
code = self.myobject.func_code
globs = self.myobject.func_globals
ret.append(MakeHLI(code, "Code" ))
ret.append(MakeHLI(globs, "Globals" ))
self.InsertDocString(ret)
return ret
class HLISeq(HLIPythonObject):
def GetHLIType(self):
return "Sequence (abstract!)"
def IsExpandable(self):
return len(self.myobject)>0
def GetSubList(self):
ret = []
pos=0
for item in self.myobject:
ret.append(MakeHLI( item, '['+str(pos)+']' ) )
pos=pos+1
self.InsertDocString(ret)
return ret
class HLIList(HLISeq):
def GetHLIType(self):
return "List"
class HLITuple(HLISeq):
def GetHLIType(self):
return "Tuple"
class HLIDict(HLIPythonObject):
def GetHLIType(self):
return "Dict"
def IsExpandable(self):
try:
self.myobject.__doc__
return 1
except (AttributeError, TypeError):
return len(self.myobject) > 0
def GetSubList(self):
ret = []
keys = list(self.myobject.keys())
keys.sort()
for key in keys:
ob = self.myobject[key]
ret.append(MakeHLI( ob, str(key) ) )
self.InsertDocString(ret)
return ret
# In Python 1.6, strings and Unicode have builtin methods, but we dont really want to see these
class HLIString(HLIPythonObject):
def IsExpandable(self):
return 0
TypeMap = { type : HLIClass,
types.FunctionType: HLIFunction,
tuple: HLITuple,
dict: HLIDict,
list: HLIList,
types.ModuleType: HLIModule,
types.CodeType : HLICode,
types.BuiltinFunctionType : HLIBuiltinFunction,
types.FrameType : HLIFrame,
types.TracebackType : HLITraceback,
str : HLIString,
unicode : HLIString,
int: HLIPythonObject,
long: HLIPythonObject,
bool: HLIPythonObject,
float: HLIPythonObject,
}
def MakeHLI( ob, name=None ):
try:
cls = TypeMap[type(ob)]
except KeyError:
# hrmph - this check gets more and more bogus as Python
# improves. Its possible we should just *always* use
# HLIInstance?
if hasattr(ob, '__class__'): # 'new style' class
cls = HLIInstance
else:
cls = HLIPythonObject
return cls( ob, name )
#########################################
#
# Dialog related.
class DialogShowObject(dialog.Dialog):
def __init__(self, object, title):
self.object = object
self.title = title
dialog.Dialog.__init__(self, win32ui.IDD_LARGE_EDIT)
def OnInitDialog(self):
import re
self.SetWindowText(self.title)
self.edit = self.GetDlgItem(win32ui.IDC_EDIT1)
try:
strval = str(self.object)
except:
t, v, tb = sys.exc_info()
strval = "Exception getting object value\n\n%s:%s" % (t, v)
tb = None
strval = re.sub('\n','\r\n', strval)
self.edit.ReplaceSel(strval)
def ShowObject(object, title):
dlg = DialogShowObject(object, title)
dlg.DoModal()
# And some mods for a sizable dialog from Sam Rushing!
import win32con
import win32api
import commctrl
class dynamic_browser (dialog.Dialog):
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
cs = (
win32con.WS_CHILD |
win32con.WS_VISIBLE |
commctrl.TVS_HASLINES |
commctrl.TVS_LINESATROOT |
commctrl.TVS_HASBUTTONS
)
dt = [
["Python Object Browser", (0, 0, 200, 200), style, None, (8, "MS Sans Serif")],
["SysTreeView32", None, win32ui.IDC_LIST1, (0, 0, 200, 200), cs]
]
def __init__ (self, hli_root):
dialog.Dialog.__init__ (self, self.dt)
self.hier_list = hierlist.HierListWithItems (
hli_root,
win32ui.IDB_BROWSER_HIER
)
self.HookMessage (self.on_size, win32con.WM_SIZE)
def OnInitDialog (self):
self.hier_list.HierInit (self)
return dialog.Dialog.OnInitDialog (self)
def OnOK(self):
self.hier_list.HierTerm()
self.hier_list = None
return self._obj_.OnOK()
def OnCancel(self):
self.hier_list.HierTerm()
self.hier_list = None
return self._obj_.OnCancel()
def on_size (self, params):
lparam = params[3]
w = win32api.LOWORD(lparam)
h = win32api.HIWORD(lparam)
self.GetDlgItem (win32ui.IDC_LIST1).MoveWindow((0,0,w,h))
def Browse (ob=__main__):
" Browse the argument, or the main dictionary "
root = MakeHLI (ob, 'root')
if not root.IsExpandable():
raise TypeError("Browse() argument must have __dict__ attribute, or be a Browser supported type")
dlg = dynamic_browser (root)
dlg.CreateWindow()
#
#
# Classes for using the browser in an MDI window, rather than a dialog
#
from pywin.mfc import docview
class BrowserTemplate(docview.DocTemplate):
def __init__(self):
docview.DocTemplate.__init__(self, win32ui.IDR_PYTHONTYPE, BrowserDocument, None, BrowserView)
def OpenObject(self, root): # Use this instead of OpenDocumentFile.
# Look for existing open document
for doc in self.GetDocumentList():
if doc.root==root:
doc.GetFirstView().ActivateFrame()
return doc
# not found - new one.
doc = BrowserDocument(self, root)
frame = self.CreateNewFrame(doc)
doc.OnNewDocument()
self.InitialUpdateFrame(frame, doc, 1)
return doc
class BrowserDocument (docview.Document):
def __init__(self, template, root):
docview.Document.__init__(self, template)
self.root = root
self.SetTitle("Browser: " + root.name)
def OnOpenDocument (self, name):
raise TypeError("This template can not open files")
return 0
class BrowserView(docview.TreeView):
def OnInitialUpdate(self):
import commctrl
rc = self._obj_.OnInitialUpdate()
list=hierlist.HierListWithItems( self.GetDocument().root, win32ui.IDB_BROWSER_HIER, win32ui.AFX_IDW_PANE_FIRST)
list.HierInit(self.GetParent())
list.SetStyle(commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS)
return rc
template = None
def MakeTemplate():
global template
if template is None:
template = BrowserTemplate() #win32ui.IDR_PYTHONTYPE, BrowserDocument, None, BrowserView)
def BrowseMDI(ob=__main__):
"""Browse an object using an MDI window.
"""
MakeTemplate()
root = MakeHLI(ob, repr(ob))
if not root.IsExpandable():
raise TypeError("Browse() argument must have __dict__ attribute, or be a Browser supported type")
template.OpenObject(root)
| gpl-3.0 |
Andrew-Katcha/storyteller | env/lib/python3.4/site-packages/setuptools/tests/test_resources.py | 345 | 23973 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# NOTE: the shebang and encoding lines are for ScriptHeaderTests do not remove
import os
import sys
import tempfile
import shutil
from unittest import TestCase
import pkg_resources
from pkg_resources import (parse_requirements, VersionConflict, parse_version,
Distribution, EntryPoint, Requirement, safe_version, safe_name,
WorkingSet)
from setuptools.command.easy_install import (get_script_header, is_sh,
nt_quote_arg)
from setuptools.compat import StringIO, iteritems
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
def safe_repr(obj, short=False):
""" copied from Python2.7"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < pkg_resources._MAX_LENGTH:
return result
return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...'
class Metadata(pkg_resources.EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self,*pairs):
self.metadata = dict(pairs)
def has_metadata(self,name):
return name in self.metadata
def get_metadata(self,name):
return self.metadata[name]
def get_metadata_lines(self,name):
return pkg_resources.yield_lines(self.get_metadata(name))
dist_from_fn = pkg_resources.Distribution.from_filename
class DistroTests(TestCase):
def testCollection(self):
# empty path should produce no distributions
ad = pkg_resources.Environment([], platform=None, python=None)
self.assertEqual(list(ad), [])
self.assertEqual(ad['FooPkg'],[])
ad.add(dist_from_fn("FooPkg-1.3_1.egg"))
ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg"))
ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg"))
# Name is in there now
self.assertTrue(ad['FooPkg'])
# But only 1 package
self.assertEqual(list(ad), ['foopkg'])
# Distributions sort by version
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2']
)
# Removing a distribution leaves sequence alone
ad.remove(ad['FooPkg'][1])
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.2']
)
# And inserting adds them in order
ad.add(dist_from_fn("FooPkg-1.9.egg"))
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2']
)
ws = WorkingSet([])
foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg")
foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg")
req, = parse_requirements("FooPkg>=1.3")
# Nominal case: no distros on path, should yield all applicable
self.assertEqual(ad.best_match(req,ws).version, '1.9')
# If a matching distro is already installed, should return only that
ws.add(foo14)
self.assertEqual(ad.best_match(req,ws).version, '1.4')
# If the first matching distro is unsuitable, it's a version conflict
ws = WorkingSet([])
ws.add(foo12)
ws.add(foo14)
self.assertRaises(VersionConflict, ad.best_match, req, ws)
# If more than one match on the path, the first one takes precedence
ws = WorkingSet([])
ws.add(foo14)
ws.add(foo12)
ws.add(foo14)
self.assertEqual(ad.best_match(req,ws).version, '1.4')
def checkFooPkg(self,d):
self.assertEqual(d.project_name, "FooPkg")
self.assertEqual(d.key, "foopkg")
self.assertEqual(d.version, "1.3-1")
self.assertEqual(d.py_version, "2.4")
self.assertEqual(d.platform, "win32")
self.assertEqual(d.parsed_version, parse_version("1.3-1"))
def testDistroBasics(self):
d = Distribution(
"/some/path",
project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
)
self.checkFooPkg(d)
d = Distribution("/some/path")
self.assertEqual(d.py_version, sys.version[:3])
self.assertEqual(d.platform, None)
def testDistroParse(self):
d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg")
self.checkFooPkg(d)
d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg-info")
self.checkFooPkg(d)
def testDistroMetadata(self):
d = Distribution(
"/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
metadata = Metadata(
('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
)
)
self.checkFooPkg(d)
def distRequires(self, txt):
return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
def checkRequires(self, dist, txt, extras=()):
self.assertEqual(
list(dist.requires(extras)),
list(parse_requirements(txt))
)
def testDistroDependsSimple(self):
for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
self.checkRequires(self.distRequires(v), v)
def testResolve(self):
ad = pkg_resources.Environment([])
ws = WorkingSet([])
# Resolving no requirements -> nothing to install
self.assertEqual(list(ws.resolve([],ad)), [])
# Request something not in the collection -> DistributionNotFound
self.assertRaises(
pkg_resources.DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad
)
Foo = Distribution.from_filename(
"/foo_dir/Foo-1.2.egg",
metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
)
ad.add(Foo)
ad.add(Distribution.from_filename("Foo-0.9.egg"))
# Request thing(s) that are available -> list to activate
for i in range(3):
targets = list(ws.resolve(parse_requirements("Foo"), ad))
self.assertEqual(targets, [Foo])
list(map(ws.add,targets))
self.assertRaises(VersionConflict, ws.resolve,
parse_requirements("Foo==0.9"), ad)
ws = WorkingSet([]) # reset
# Request an extra that causes an unresolved dependency for "Baz"
self.assertRaises(
pkg_resources.DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad
)
Baz = Distribution.from_filename(
"/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
)
ad.add(Baz)
# Activation list now includes resolved dependency
self.assertEqual(
list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz]
)
# Requests for conflicting versions produce VersionConflict
self.assertRaises(VersionConflict,
ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad)
def testDistroDependsOptions(self):
d = self.distRequires("""
Twisted>=1.5
[docgen]
ZConfig>=2.0
docutils>=0.3
[fastcgi]
fcgiapp>=0.1""")
self.checkRequires(d,"Twisted>=1.5")
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
["docgen","fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
["fastcgi", "docgen"]
)
self.assertRaises(pkg_resources.UnknownExtra, d.requires, ["foo"])
class EntryPointTests(TestCase):
def assertfields(self, ep):
self.assertEqual(ep.name,"foo")
self.assertEqual(ep.module_name,"setuptools.tests.test_resources")
self.assertEqual(ep.attrs, ("EntryPointTests",))
self.assertEqual(ep.extras, ("x",))
self.assertTrue(ep.load() is EntryPointTests)
self.assertEqual(
str(ep),
"foo = setuptools.tests.test_resources:EntryPointTests [x]"
)
def setUp(self):
self.dist = Distribution.from_filename(
"FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
def testBasics(self):
ep = EntryPoint(
"foo", "setuptools.tests.test_resources", ["EntryPointTests"],
["x"], self.dist
)
self.assertfields(ep)
def testParse(self):
s = "foo = setuptools.tests.test_resources:EntryPointTests [x]"
ep = EntryPoint.parse(s, self.dist)
self.assertfields(ep)
ep = EntryPoint.parse("bar baz= spammity[PING]")
self.assertEqual(ep.name,"bar baz")
self.assertEqual(ep.module_name,"spammity")
self.assertEqual(ep.attrs, ())
self.assertEqual(ep.extras, ("ping",))
ep = EntryPoint.parse(" fizzly = wocka:foo")
self.assertEqual(ep.name,"fizzly")
self.assertEqual(ep.module_name,"wocka")
self.assertEqual(ep.attrs, ("foo",))
self.assertEqual(ep.extras, ())
def testRejects(self):
for ep in [
"foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2",
]:
try: EntryPoint.parse(ep)
except ValueError: pass
else: raise AssertionError("Should've been bad", ep)
def checkSubMap(self, m):
self.assertEqual(len(m), len(self.submap_expect))
for key, ep in iteritems(self.submap_expect):
self.assertEqual(repr(m.get(key)), repr(ep))
submap_expect = dict(
feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']),
feature3=EntryPoint('feature3', 'this.module', extras=['something'])
)
submap_str = """
# define features for blah blah
feature1 = somemodule:somefunction
feature2 = another.module:SomeClass [extra1,extra2]
feature3 = this.module [something]
"""
def testParseList(self):
self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar")
self.assertRaises(ValueError, EntryPoint.parse_group, "x",
["foo=baz", "foo=bar"])
def testParseMap(self):
m = EntryPoint.parse_map({'xyz':self.submap_str})
self.checkSubMap(m['xyz'])
self.assertEqual(list(m.keys()),['xyz'])
m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
self.checkSubMap(m['xyz'])
self.assertEqual(list(m.keys()),['xyz'])
self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"])
self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str)
class RequirementsTests(TestCase):
def testBasics(self):
r = Requirement.parse("Twisted>=1.2")
self.assertEqual(str(r),"Twisted>=1.2")
self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
def testOrdering(self):
r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
self.assertEqual(r1,r2)
self.assertEqual(str(r1),str(r2))
self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2")
def testBasicContains(self):
r = Requirement("Twisted", [('>=','1.2')], ())
foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
twist11 = Distribution.from_filename("Twisted-1.1.egg")
twist12 = Distribution.from_filename("Twisted-1.2.egg")
self.assertTrue(parse_version('1.2') in r)
self.assertTrue(parse_version('1.1') not in r)
self.assertTrue('1.2' in r)
self.assertTrue('1.1' not in r)
self.assertTrue(foo_dist not in r)
self.assertTrue(twist11 not in r)
self.assertTrue(twist12 in r)
def testAdvancedContains(self):
r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5")
for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'):
self.assertTrue(v in r, (v,r))
for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'):
self.assertTrue(v not in r, (v,r))
def testOptionsAndHashing(self):
r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
self.assertEqual(r1,r2)
self.assertEqual(r1,r3)
self.assertEqual(r1.extras, ("foo","bar"))
self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized
self.assertEqual(hash(r1), hash(r2))
self.assertEqual(
hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
frozenset(["foo","bar"])))
)
def testVersionEquality(self):
r1 = Requirement.parse("foo==0.3a2")
r2 = Requirement.parse("foo!=0.3a4")
d = Distribution.from_filename
self.assertTrue(d("foo-0.3a4.egg") not in r1)
self.assertTrue(d("foo-0.3a1.egg") not in r1)
self.assertTrue(d("foo-0.3a4.egg") not in r2)
self.assertTrue(d("foo-0.3a2.egg") in r1)
self.assertTrue(d("foo-0.3a2.egg") in r2)
self.assertTrue(d("foo-0.3a3.egg") in r2)
self.assertTrue(d("foo-0.3a5.egg") in r2)
def testSetuptoolsProjectName(self):
"""
The setuptools project should implement the setuptools package.
"""
self.assertEqual(
Requirement.parse('setuptools').project_name, 'setuptools')
# setuptools 0.7 and higher means setuptools.
self.assertEqual(
Requirement.parse('setuptools == 0.7').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools >= 0.7').project_name, 'setuptools')
class ParseTests(TestCase):
def testEmptyParse(self):
self.assertEqual(list(parse_requirements('')), [])
def testYielding(self):
for inp,out in [
([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
(['x\n\n','y'], ['x','y']),
]:
self.assertEqual(list(pkg_resources.yield_lines(inp)),out)
def testSplitting(self):
sample = """
x
[Y]
z
a
[b ]
# foo
c
[ d]
[q]
v
"""
self.assertEqual(list(pkg_resources.split_sections(sample)),
[(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])]
)
self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo"))
def testSafeName(self):
self.assertEqual(safe_name("adns-python"), "adns-python")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker")
self.assertNotEqual(safe_name("peak.web"), "peak-web")
def testSafeVersion(self):
self.assertEqual(safe_version("1.2-1"), "1.2-1")
self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha")
self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521")
self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker")
self.assertEqual(safe_version("peak.web"), "peak.web")
def testSimpleRequirements(self):
self.assertEqual(
list(parse_requirements('Twis-Ted>=1.2-1')),
[Requirement('Twis-Ted',[('>=','1.2-1')], ())]
)
self.assertEqual(
list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')),
[Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
)
self.assertEqual(
Requirement.parse("FooBar==1.99a3"),
Requirement("FooBar", [('==','1.99a3')], ())
)
self.assertRaises(ValueError,Requirement.parse,">=2.3")
self.assertRaises(ValueError,Requirement.parse,"x\\")
self.assertRaises(ValueError,Requirement.parse,"x==2 q")
self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2")
self.assertRaises(ValueError,Requirement.parse,"#")
def testVersionEquality(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertEqual(p1,p2, (s1,s2,p1,p2))
c('1.2-rc1', '1.2rc1')
c('0.4', '0.4.0')
c('0.4.0.0', '0.4.0')
c('0.4.0-0', '0.4-0')
c('0pl1', '0.0pl1')
c('0pre1', '0.0c1')
c('0.0.0preview1', '0c1')
c('0.0c1', '0-rc1')
c('1.2a1', '1.2.a.1')
c('1.2...a', '1.2a')
def testVersionOrdering(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertTrue(p1<p2, (s1,s2,p1,p2))
c('2.1','2.1.1')
c('2a1','2b0')
c('2a1','2.1')
c('2.3a1', '2.3')
c('2.1-1', '2.1-2')
c('2.1-1', '2.1.1')
c('2.1', '2.1pl4')
c('2.1a0-20040501', '2.1')
c('1.1', '02.1')
c('A56','B27')
c('3.2', '3.2.pl0')
c('3.2-1', '3.2pl1')
c('3.2pl1', '3.2pl1-1')
c('0.4', '4.0')
c('0.0.4', '0.4.0')
c('0pl1', '0.4pl1')
c('2.1.0-rc1','2.1.0')
c('2.1dev','2.1a0')
torture ="""
0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
0.77.2-1 0.77.1-1 0.77.0-1
""".split()
for p,v1 in enumerate(torture):
for v2 in torture[p+1:]:
c(v2,v1)
class ScriptHeaderTests(TestCase):
non_ascii_exe = '/Users/José/bin/python'
exe_with_spaces = r'C:\Program Files\Python33\python.exe'
def test_get_script_header(self):
if not sys.platform.startswith('java') or not is_sh(sys.executable):
# This test is for non-Jython platforms
expected = '#!%s\n' % nt_quote_arg(os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/local/bin/python'),
expected)
expected = '#!%s -x\n' % nt_quote_arg(os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python -x'),
expected)
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
candidate = get_script_header('#!/usr/bin/python',
executable=self.exe_with_spaces)
self.assertEqual(candidate, '#!"%s"\n' % self.exe_with_spaces)
def test_get_script_header_jython_workaround(self):
# This test doesn't work with Python 3 in some locales
if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE")
in (None, "C", "POSIX")):
return
class java:
class lang:
class System:
@staticmethod
def getProperty(property):
return ""
sys.modules["java"] = java
platform = sys.platform
sys.platform = 'java1.5.0_13'
stdout, stderr = sys.stdout, sys.stderr
try:
# A mock sys.executable that uses a shebang line (this file)
exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py')
self.assertEqual(
get_script_header('#!/usr/local/bin/python', executable=exe),
'#!/usr/bin/env %s\n' % exe)
# Ensure we generate what is basically a broken shebang line
# when there's options, with a warning emitted
sys.stdout = sys.stderr = StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python -x',
executable=exe),
'#!%s -x\n' % exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
sys.stdout = sys.stderr = StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
finally:
del sys.modules["java"]
sys.platform = platform
sys.stdout, sys.stderr = stdout, stderr
class NamespaceTests(TestCase):
def setUp(self):
self._ns_pkgs = pkg_resources._namespace_packages.copy()
self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-")
os.makedirs(os.path.join(self._tmpdir, "site-pkgs"))
self._prev_sys_path = sys.path[:]
sys.path.append(os.path.join(self._tmpdir, "site-pkgs"))
def tearDown(self):
shutil.rmtree(self._tmpdir)
pkg_resources._namespace_packages = self._ns_pkgs.copy()
sys.path = self._prev_sys_path[:]
def _assertIn(self, member, container):
""" assertIn and assertTrue does not exist in Python2.3"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_two_levels_deep(self):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
sys.path.append(os.path.join(self._tmpdir, "site-pkgs2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"))
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
for site in ["site-pkgs", "site-pkgs2"]:
pkg1_init = open(os.path.join(self._tmpdir, site,
"pkg1", "__init__.py"), "w")
pkg1_init.write(ns_str)
pkg1_init.close()
pkg2_init = open(os.path.join(self._tmpdir, site,
"pkg1", "pkg2", "__init__.py"), "w")
pkg2_init.write(ns_str)
pkg2_init.close()
import pkg1
self._assertIn("pkg1", pkg_resources._namespace_packages.keys())
try:
import pkg1.pkg2
except ImportError:
self.fail("Setuptools tried to import the parent namespace package")
# check the _namespace_packages dict
self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys())
self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"])
# check the __path__ attribute contains both paths
self.assertEqual(pkg1.pkg2.__path__, [
os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"),
os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")])
| mit |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/sqlite3/test/userfunctions.py | 90 | 15023 | #-*- coding: iso-8859-1 -*-
# pysqlite2/test/userfunctions.py: tests for user-defined functions and
# aggregates.
#
# Copyright (C) 2005-2007 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sqlite3 as sqlite
def func_returntext():
return "foo"
def func_returnunicode():
return "bar"
def func_returnint():
return 42
def func_returnfloat():
return 3.14
def func_returnnull():
return None
def func_returnblob():
return b"blob"
def func_returnlonglong():
return 1<<31
def func_raiseexception():
5/0
def func_isstring(v):
return type(v) is str
def func_isint(v):
return type(v) is int
def func_isfloat(v):
return type(v) is float
def func_isnone(v):
return type(v) is type(None)
def func_isblob(v):
return isinstance(v, (bytes, memoryview))
def func_islonglong(v):
return isinstance(v, int) and v >= 1<<31
class AggrNoStep:
def __init__(self):
pass
def finalize(self):
return 1
class AggrNoFinalize:
def __init__(self):
pass
def step(self, x):
pass
class AggrExceptionInInit:
def __init__(self):
5/0
def step(self, x):
pass
def finalize(self):
pass
class AggrExceptionInStep:
def __init__(self):
pass
def step(self, x):
5/0
def finalize(self):
return 42
class AggrExceptionInFinalize:
def __init__(self):
pass
def step(self, x):
pass
def finalize(self):
5/0
class AggrCheckType:
def __init__(self):
self.val = None
def step(self, whichType, val):
theType = {"str": str, "int": int, "float": float, "None": type(None),
"blob": bytes}
self.val = int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrSum:
def __init__(self):
self.val = 0.0
def step(self, val):
self.val += val
def finalize(self):
return self.val
class FunctionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.create_function("returntext", 0, func_returntext)
self.con.create_function("returnunicode", 0, func_returnunicode)
self.con.create_function("returnint", 0, func_returnint)
self.con.create_function("returnfloat", 0, func_returnfloat)
self.con.create_function("returnnull", 0, func_returnnull)
self.con.create_function("returnblob", 0, func_returnblob)
self.con.create_function("returnlonglong", 0, func_returnlonglong)
self.con.create_function("raiseexception", 0, func_raiseexception)
self.con.create_function("isstring", 1, func_isstring)
self.con.create_function("isint", 1, func_isint)
self.con.create_function("isfloat", 1, func_isfloat)
self.con.create_function("isnone", 1, func_isnone)
self.con.create_function("isblob", 1, func_isblob)
self.con.create_function("islonglong", 1, func_islonglong)
def tearDown(self):
self.con.close()
def CheckFuncErrorOnCreate(self):
try:
self.con.create_function("bla", -100, lambda x: 2*x)
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
def CheckFuncRefCount(self):
def getfunc():
def f():
return 1
return f
f = getfunc()
globals()["foo"] = f
# self.con.create_function("reftest", 0, getfunc())
self.con.create_function("reftest", 0, f)
cur = self.con.cursor()
cur.execute("select reftest()")
def CheckFuncReturnText(self):
cur = self.con.cursor()
cur.execute("select returntext()")
val = cur.fetchone()[0]
self.assertEqual(type(val), str)
self.assertEqual(val, "foo")
def CheckFuncReturnUnicode(self):
cur = self.con.cursor()
cur.execute("select returnunicode()")
val = cur.fetchone()[0]
self.assertEqual(type(val), str)
self.assertEqual(val, "bar")
def CheckFuncReturnInt(self):
cur = self.con.cursor()
cur.execute("select returnint()")
val = cur.fetchone()[0]
self.assertEqual(type(val), int)
self.assertEqual(val, 42)
def CheckFuncReturnFloat(self):
cur = self.con.cursor()
cur.execute("select returnfloat()")
val = cur.fetchone()[0]
self.assertEqual(type(val), float)
if val < 3.139 or val > 3.141:
self.fail("wrong value")
def CheckFuncReturnNull(self):
cur = self.con.cursor()
cur.execute("select returnnull()")
val = cur.fetchone()[0]
self.assertEqual(type(val), type(None))
self.assertEqual(val, None)
def CheckFuncReturnBlob(self):
cur = self.con.cursor()
cur.execute("select returnblob()")
val = cur.fetchone()[0]
self.assertEqual(type(val), bytes)
self.assertEqual(val, b"blob")
def CheckFuncReturnLongLong(self):
cur = self.con.cursor()
cur.execute("select returnlonglong()")
val = cur.fetchone()[0]
self.assertEqual(val, 1<<31)
def CheckFuncException(self):
cur = self.con.cursor()
try:
cur.execute("select raiseexception()")
cur.fetchone()
self.fail("should have raised OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0], 'user-defined function raised exception')
def CheckParamString(self):
cur = self.con.cursor()
cur.execute("select isstring(?)", ("foo",))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamInt(self):
cur = self.con.cursor()
cur.execute("select isint(?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select isfloat(?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamNone(self):
cur = self.con.cursor()
cur.execute("select isnone(?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select isblob(?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamLongLong(self):
cur = self.con.cursor()
cur.execute("select islonglong(?)", (1<<42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
class AggregateTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
cur = self.con.cursor()
cur.execute("""
create table test(
t text,
i integer,
f float,
n,
b blob
)
""")
cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)",
("foo", 5, 3.14, None, memoryview(b"blob"),))
self.con.create_aggregate("nostep", 1, AggrNoStep)
self.con.create_aggregate("nofinalize", 1, AggrNoFinalize)
self.con.create_aggregate("excInit", 1, AggrExceptionInInit)
self.con.create_aggregate("excStep", 1, AggrExceptionInStep)
self.con.create_aggregate("excFinalize", 1, AggrExceptionInFinalize)
self.con.create_aggregate("checkType", 2, AggrCheckType)
self.con.create_aggregate("mysum", 1, AggrSum)
def tearDown(self):
#self.cur.close()
#self.con.close()
pass
def CheckAggrErrorOnCreate(self):
try:
self.con.create_function("bla", -100, AggrSum)
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
def CheckAggrNoStep(self):
cur = self.con.cursor()
try:
cur.execute("select nostep(t) from test")
self.fail("should have raised an AttributeError")
except AttributeError as e:
self.assertEqual(e.args[0], "'AggrNoStep' object has no attribute 'step'")
def CheckAggrNoFinalize(self):
cur = self.con.cursor()
try:
cur.execute("select nofinalize(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0], "user-defined aggregate's 'finalize' method raised error")
def CheckAggrExceptionInInit(self):
cur = self.con.cursor()
try:
cur.execute("select excInit(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0], "user-defined aggregate's '__init__' method raised error")
def CheckAggrExceptionInStep(self):
cur = self.con.cursor()
try:
cur.execute("select excStep(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0], "user-defined aggregate's 'step' method raised error")
def CheckAggrExceptionInFinalize(self):
cur = self.con.cursor()
try:
cur.execute("select excFinalize(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0], "user-defined aggregate's 'finalize' method raised error")
def CheckAggrCheckParamStr(self):
cur = self.con.cursor()
cur.execute("select checkType('str', ?)", ("foo",))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamInt(self):
cur = self.con.cursor()
cur.execute("select checkType('int', ?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select checkType('float', ?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamNone(self):
cur = self.con.cursor()
cur.execute("select checkType('None', ?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select checkType('blob', ?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckAggrSum(self):
cur = self.con.cursor()
cur.execute("delete from test")
cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
cur.execute("select mysum(i) from test")
val = cur.fetchone()[0]
self.assertEqual(val, 60)
class AuthorizerTests(unittest.TestCase):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return sqlite.SQLITE_DENY
if arg2 == 'c2' or arg1 == 't2':
return sqlite.SQLITE_DENY
return sqlite.SQLITE_OK
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.executescript("""
create table t1 (c1, c2);
create table t2 (c1, c2);
insert into t1 (c1, c2) values (1, 2);
insert into t2 (c1, c2) values (4, 5);
""")
# For our security test:
self.con.execute("select c2 from t2")
self.con.set_authorizer(self.authorizer_cb)
def tearDown(self):
pass
def test_table_access(self):
try:
self.con.execute("select * from t2")
except sqlite.DatabaseError as e:
if not e.args[0].endswith("prohibited"):
self.fail("wrong exception text: %s" % e.args[0])
return
self.fail("should have raised an exception due to missing privileges")
def test_column_access(self):
try:
self.con.execute("select c2 from t1")
except sqlite.DatabaseError as e:
if not e.args[0].endswith("prohibited"):
self.fail("wrong exception text: %s" % e.args[0])
return
self.fail("should have raised an exception due to missing privileges")
class AuthorizerRaiseExceptionTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
raise ValueError
if arg2 == 'c2' or arg1 == 't2':
raise ValueError
return sqlite.SQLITE_OK
class AuthorizerIllegalTypeTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 0.0
if arg2 == 'c2' or arg1 == 't2':
return 0.0
return sqlite.SQLITE_OK
class AuthorizerLargeIntegerTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 2**32
if arg2 == 'c2' or arg1 == 't2':
return 2**32
return sqlite.SQLITE_OK
def suite():
function_suite = unittest.makeSuite(FunctionTests, "Check")
aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
authorizer_suite = unittest.makeSuite(AuthorizerTests)
return unittest.TestSuite((
function_suite,
aggregate_suite,
authorizer_suite,
unittest.makeSuite(AuthorizerRaiseExceptionTests),
unittest.makeSuite(AuthorizerIllegalTypeTests),
unittest.makeSuite(AuthorizerLargeIntegerTests),
))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| lgpl-3.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_gdbm.py | 105 | 2540 | import unittest
import os
from test.test_support import TESTFN, run_unittest, unlink, import_module
gdbm = import_module('gdbm')
filename = TESTFN
class TestGdbm(unittest.TestCase):
def setUp(self):
self.g = None
def tearDown(self):
if self.g is not None:
self.g.close()
unlink(filename)
def test_key_methods(self):
self.g = gdbm.open(filename, 'c')
self.assertEqual(self.g.keys(), [])
self.g['a'] = 'b'
self.g['12345678910'] = '019237410982340912840198242'
key_set = set(self.g.keys())
self.assertEqual(key_set, frozenset(['a', '12345678910']))
self.assertTrue(self.g.has_key('a'))
key = self.g.firstkey()
while key:
self.assertIn(key, key_set)
key_set.remove(key)
key = self.g.nextkey(key)
self.assertRaises(KeyError, lambda: self.g['xxx'])
def test_error_conditions(self):
# Try to open a non-existent database.
unlink(filename)
self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
# Try to access a closed database.
self.g = gdbm.open(filename, 'c')
self.g.close()
self.assertRaises(gdbm.error, lambda: self.g['a'])
# try pass an invalid open flag
self.assertRaises(gdbm.error, lambda: gdbm.open(filename, 'rx').close())
def test_flags(self):
# Test the flag parameter open() by trying all supported flag modes.
all = set(gdbm.open_flags)
# Test standard flags (presumably "crwn").
modes = all - set('fsu')
for mode in sorted(modes):
self.g = gdbm.open(filename, mode)
self.g.close()
# Test additional flags (presumably "fsu").
flags = all - set('crwn')
for mode in modes:
for flag in flags:
self.g = gdbm.open(filename, mode + flag)
self.g.close()
def test_reorganize(self):
self.g = gdbm.open(filename, 'c')
size0 = os.path.getsize(filename)
self.g['x'] = 'x' * 10000
size1 = os.path.getsize(filename)
self.assertTrue(size0 < size1)
del self.g['x']
# 'size' is supposed to be the same even after deleting an entry.
self.assertEqual(os.path.getsize(filename), size1)
self.g.reorganize()
size2 = os.path.getsize(filename)
self.assertTrue(size1 > size2 >= size0)
def test_main():
run_unittest(TestGdbm)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/motion_detector_v2/motion_detector_v2.py | 1 | 7195 | # -*- coding: utf-8 -*-
"""
Motion Detector V2 Plugin
Copyright (C) 2017 Olaf Lüke <[email protected]>
motion_detector_v2.py: Motion Detector V2 Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import pyqtSignal, QTimer
from brickv.plugin_system.comcu_plugin_base import COMCUPluginBase
from brickv.bindings.bricklet_motion_detector_v2 import BrickletMotionDetectorV2
from brickv.plugin_system.plugins.motion_detector_v2.ui_motion_detector_v2 import Ui_MotionDetectorV2
from brickv.slider_spin_syncer import SliderSpinSyncer
from brickv.async_call import async_call
class MotionDetectorV2(COMCUPluginBase, Ui_MotionDetectorV2):
qtcb_motion_detected = pyqtSignal()
qtcb_detection_cylce_ended = pyqtSignal()
def __init__(self, *args):
COMCUPluginBase.__init__(self, BrickletMotionDetectorV2, *args)
self.motion_detector_v2 = self.device
self.changing = False
self.setupUi(self)
self.qtcb_motion_detected.connect(self.cb_motion_detected)
self.motion_detector_v2.register_callback(self.motion_detector_v2.CALLBACK_MOTION_DETECTED,
self.qtcb_motion_detected.emit)
self.qtcb_detection_cylce_ended.connect(self.cb_detection_cycle_ended)
self.motion_detector_v2.register_callback(self.motion_detector_v2.CALLBACK_DETECTION_CYCLE_ENDED,
self.qtcb_detection_cylce_ended.emit)
self.left_syncer = SliderSpinSyncer(self.slider_left, self.spin_left, self.indicator_changed, spin_signal='valueChanged')
self.right_syncer = SliderSpinSyncer(self.slider_right, self.spin_right, self.indicator_changed, spin_signal='valueChanged')
self.bottom_syncer = SliderSpinSyncer(self.slider_bottom, self.spin_bottom, self.indicator_changed, spin_signal='valueChanged')
self.all_syncer = SliderSpinSyncer(self.slider_all, self.spin_all, self.all_changed, spin_signal='valueChanged')
self.sensitivity_syncer = SliderSpinSyncer(self.slider_sensitivity, self.spin_sensitivity, self.sensitivity_changed, spin_signal='valueChanged')
def set_indicator(l, r, b):
self.changing = True
self.spin_left.setValue(l)
self.spin_right.setValue(r)
self.spin_bottom.setValue(b)
self.changing = False
self.indicator_changed()
self.button_off.clicked.connect(lambda: set_indicator(0, 0, 0))
self.button_on.clicked.connect(lambda: set_indicator(255, 255, 255))
self.button_left.clicked.connect(lambda: set_indicator(255, 0, 0))
self.button_right.clicked.connect(lambda: set_indicator(0, 255, 0))
self.button_bottom.clicked.connect(lambda: set_indicator(0, 0, 255))
self.indicator_update = False
self.indicator_value = [0, 0, 0]
self.sensitivity_update = False
self.sensitivity_value = 50
self.update_timer = QTimer(self)
self.update_timer.timeout.connect(self.update)
self.update_timer.setInterval(50)
self.update_timer.start()
def start(self):
async_call(self.motion_detector_v2.get_indicator, None, self.get_indicator_async, self.increase_error_count)
async_call(self.motion_detector_v2.get_motion_detected, None, self.get_motion_detected_async, self.increase_error_count)
async_call(self.motion_detector_v2.get_sensitivity, None, self.get_sensitivity_async, self.increase_error_count)
def stop(self):
pass
# Make sure that we update values with at most a 50ms interval
def update(self):
if self.indicator_update:
self.indicator_update = False
self.motion_detector_v2.set_indicator(*self.indicator_value)
if self.sensitivity_update:
self.sensitivity_update = False
self.motion_detector_v2.set_sensitivity(self.sensitivity_value)
def sensitivity_changed(self, value):
self.sensitivity_value = value
self.sensitivity_update = True
def get_sensitivity_async(self, sensitivity):
self.spin_sensitivity.setValue(sensitivity)
def get_motion_detected_async(self, motion):
if motion == self.motion_detector_v2.MOTION_DETECTED:
self.cb_motion_detected()
elif motion == self.motion_detector_v2.MOTION_NOT_DETECTED:
self.cb_detection_cycle_ended()
def cb_motion_detected(self):
self.label_motion.setText("<font color='red'>Motion Detected</font>")
def cb_detection_cycle_ended(self):
self.label_motion.setText("No Motion Detected")
def indicator_changed(self, *_args):
if self.changing:
return
left, right, bottom = self.spin_left.value(), self.spin_right.value(), self.spin_bottom.value()
self.changing = True
self.spin_all.setValue((left+right+bottom)//3)
self.changing = False
self.indicator_value = [left, right, bottom]
self.indicator_update = True
self.label_color_left.setStyleSheet('QLabel {{ background: #{:02x}{:02x}{:02x} }}'.format(0, 0, left))
self.label_color_right.setStyleSheet('QLabel {{ background: #{:02x}{:02x}{:02x} }}'.format(0, 0, right))
self.label_color_bottom.setStyleSheet('QLabel {{ background: #{:02x}{:02x}{:02x} }}'.format(0, 0, bottom))
def all_changed(self, *_args):
if self.changing:
return
x = self.spin_all.value()
self.changing = True
self.spin_left.setValue(x)
self.spin_right.setValue(x)
self.spin_bottom.setValue(x)
self.changing = False
self.indicator_value = [x, x, x]
self.indicator_update = True
self.label_color_left.setStyleSheet('QLabel {{ background: #{:02x}{:02x}{:02x} }}'.format(0, 0, x))
self.label_color_right.setStyleSheet('QLabel {{ background: #{:02x}{:02x}{:02x} }}'.format(0, 0, x))
self.label_color_bottom.setStyleSheet('QLabel {{ background: #{:02x}{:02x}{:02x} }}'.format(0, 0, x))
def get_indicator_async(self, indicator):
self.changing = True
self.spin_left.setValue(indicator.top_left)
self.spin_right.setValue(indicator.top_right)
self.spin_bottom.setValue(indicator.bottom)
self.changing = False
self.indicator_changed()
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletMotionDetectorV2.DEVICE_IDENTIFIER
| gpl-2.0 |
xifle/home-assistant | homeassistant/components/sensor/apcupsd.py | 29 | 7186 | """
Provides a sensor to track various status aspects of a UPS.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.apcupsd/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.components import apcupsd
from homeassistant.const import (TEMP_CELSIUS, CONF_RESOURCES)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [apcupsd.DOMAIN]
SENSOR_PREFIX = 'UPS '
SENSOR_TYPES = {
'alarmdel': ['Alarm Delay', '', 'mdi:alarm'],
'ambtemp': ['Ambient Temperature', '', 'mdi:thermometer'],
'apc': ['Status Data', '', 'mdi:information-outline'],
'apcmodel': ['Model', '', 'mdi:information-outline'],
'badbatts': ['Bad Batteries', '', 'mdi:information-outline'],
'battdate': ['Battery Replaced', '', 'mdi:calendar-clock'],
'battstat': ['Battery Status', '', 'mdi:information-outline'],
'battv': ['Battery Voltage', 'V', 'mdi:flash'],
'bcharge': ['Battery', '%', 'mdi:battery'],
'cable': ['Cable Type', '', 'mdi:ethernet-cable'],
'cumonbatt': ['Total Time on Battery', '', 'mdi:timer'],
'date': ['Status Date', '', 'mdi:calendar-clock'],
'dipsw': ['Dip Switch Settings', '', 'mdi:information-outline'],
'dlowbatt': ['Low Battery Signal', '', 'mdi:clock-alert'],
'driver': ['Driver', '', 'mdi:information-outline'],
'dshutd': ['Shutdown Delay', '', 'mdi:timer'],
'dwake': ['Wake Delay', '', 'mdi:timer'],
'endapc': ['Date and Time', '', 'mdi:calendar-clock'],
'extbatts': ['External Batteries', '', 'mdi:information-outline'],
'firmware': ['Firmware Version', '', 'mdi:information-outline'],
'hitrans': ['Transfer High', 'V', 'mdi:flash'],
'hostname': ['Hostname', '', 'mdi:information-outline'],
'humidity': ['Ambient Humidity', '%', 'mdi:water-percent'],
'itemp': ['Internal Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'lastxfer': ['Last Transfer', '', 'mdi:transfer'],
'linefail': ['Input Voltage Status', '', 'mdi:information-outline'],
'linefreq': ['Line Frequency', 'Hz', 'mdi:information-outline'],
'linev': ['Input Voltage', 'V', 'mdi:flash'],
'loadpct': ['Load', '%', 'mdi:gauge'],
'lotrans': ['Transfer Low', 'V', 'mdi:flash'],
'mandate': ['Manufacture Date', '', 'mdi:calendar'],
'masterupd': ['Master Update', '', 'mdi:information-outline'],
'maxlinev': ['Input Voltage High', 'V', 'mdi:flash'],
'maxtime': ['Battery Timeout', '', 'mdi:timer-off'],
'mbattchg': ['Battery Shutdown', '%', 'mdi:battery-alert'],
'minlinev': ['Input Voltage Low', 'V', 'mdi:flash'],
'mintimel': ['Shutdown Time', '', 'mdi:timer'],
'model': ['Model', '', 'mdi:information-outline'],
'nombattv': ['Battery Nominal Voltage', 'V', 'mdi:flash'],
'nominv': ['Nominal Input Voltage', 'V', 'mdi:flash'],
'nomoutv': ['Nominal Output Voltage', 'V', 'mdi:flash'],
'nompower': ['Nominal Output Power', 'W', 'mdi:flash'],
'numxfers': ['Transfer Count', '', 'mdi:counter'],
'outputv': ['Output Voltage', 'V', 'mdi:flash'],
'reg1': ['Register 1 Fault', '', 'mdi:information-outline'],
'reg2': ['Register 2 Fault', '', 'mdi:information-outline'],
'reg3': ['Register 3 Fault', '', 'mdi:information-outline'],
'retpct': ['Restore Requirement', '%', 'mdi:battery-alert'],
'selftest': ['Last Self Test', '', 'mdi:calendar-clock'],
'sense': ['Sensitivity', '', 'mdi:information-outline'],
'serialno': ['Serial Number', '', 'mdi:information-outline'],
'starttime': ['Startup Time', '', 'mdi:calendar-clock'],
'statflag': ['Status Flag', '', 'mdi:information-outline'],
'status': ['Status', '', 'mdi:information-outline'],
'stesti': ['Self Test Interval', '', 'mdi:information-outline'],
'timeleft': ['Time Left', '', 'mdi:clock-alert'],
'tonbatt': ['Time on Battery', '', 'mdi:timer'],
'upsmode': ['Mode', '', 'mdi:information-outline'],
'upsname': ['Name', '', 'mdi:information-outline'],
'version': ['Daemon Info', '', 'mdi:information-outline'],
'xoffbat': ['Transfer from Battery', '', 'mdi:transfer'],
'xoffbatt': ['Transfer from Battery', '', 'mdi:transfer'],
'xonbatt': ['Transfer to Battery', '', 'mdi:transfer'],
}
SPECIFIC_UNITS = {
'ITEMP': TEMP_CELSIUS
}
INFERRED_UNITS = {
' Minutes': 'min',
' Seconds': 'sec',
' Percent': '%',
' Volts': 'V',
' Watts': 'W',
' Hz': 'Hz',
' C': TEMP_CELSIUS,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCES, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Setup the APCUPSd sensors."""
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
if sensor_type not in SENSOR_TYPES:
SENSOR_TYPES[sensor_type] = [
sensor_type.title(), '', 'mdi:information-outline']
if sensor_type.upper() not in apcupsd.DATA.status:
_LOGGER.warning(
'Sensor type: "%s" does not appear in the APCUPSd status '
'output', sensor_type)
entities.append(APCUPSdSensor(apcupsd.DATA, sensor_type))
add_entities(entities)
def infer_unit(value):
"""If the value ends with any of the units from ALL_UNITS.
Split the unit off the end of the value and return the value, unit tuple
pair. Else return the original value and None as the unit.
"""
from apcaccess.status import ALL_UNITS
for unit in ALL_UNITS:
if value.endswith(unit):
return value[:-len(unit)], INFERRED_UNITS.get(unit, unit.strip())
return value, None
class APCUPSdSensor(Entity):
"""Representation of a sensor entity for APCUPSd status values."""
def __init__(self, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = SENSOR_PREFIX + SENSOR_TYPES[sensor_type][0]
self._unit = SENSOR_TYPES[sensor_type][1]
self._inferred_unit = None
self.update()
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return true if the UPS is online, else False."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if not self._unit:
return self._inferred_unit
return self._unit
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self.type.upper() not in self._data.status:
self._state = None
self._inferred_unit = None
else:
self._state, self._inferred_unit = infer_unit(
self._data.status[self.type.upper()])
| mit |
GertBurger/pygame_cffi | test/test_utils/__init__.py | 18 | 5834 | #################################### IMPORTS ###################################
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import unittest
else:
from test.test_utils import unittest
import tempfile, sys, pygame, time, os
################################################################################
# Python 3.x compatibility
try:
xrange_ = xrange
except NameError:
xrange_ = range
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
def geterror():
return sys.exc_info()[1]
################################################################################
this_dir = os.path.dirname(os.path.abspath(__file__))
trunk_dir = os.path.split(os.path.split(this_dir)[0])[0]
if is_pygame_pkg:
test_module = 'tests'
else:
test_module = 'test'
def trunk_relative_path(relative):
return os.path.normpath(os.path.join(trunk_dir, relative))
def fixture_path(path):
return trunk_relative_path(os.path.join(test_module, 'fixtures', path))
def example_path(path):
return trunk_relative_path(os.path.join('examples', path))
sys.path.insert(0, trunk_relative_path('.'))
############################### INCOMPLETE TESTS ###############################
# TODO: PHASE THIS OUT
# Just prefix TODO test names with todo_.
# eg def todo_test_sanity__is_overrated(self): self.fail()
# Change test loader to load test_ and todo_ TestCase callables as tests
fail_incomplete_tests = 0
def test_not_implemented():
return not fail_incomplete_tests
################################## TEMP FILES ##################################
def get_tmp_dir():
return tempfile.mkdtemp()
################################################################################
def question(q):
return raw_input_('%s ' % q.rstrip(' ')).lower().strip() == 'y'
def prompt(p):
return raw_input_('%s (and press enter to continue) ' % p.rstrip(' '))
#################################### HELPERS ###################################
def rgba_between(value, minimum=0, maximum=255):
if value < minimum: return minimum
elif value > maximum: return maximum
else: return value
def combinations(seqs):
"""
Recipe 496807 from ActiveState Python CookBook
Non recursive technique for getting all possible combinations of a sequence
of sequences.
"""
r=[[]]
for x in seqs:
r = [ i + [y] for y in x for i in r ]
return r
def gradient(width, height):
"""
Yields a pt and corresponding RGBA tuple, for every (width, height) combo.
Useful for generating gradients.
Actual gradient may be changed, no tests rely on specific values.
Used in transform.rotate lossless tests to generate a fixture.
"""
for l in xrange_(width):
for t in xrange_(height):
yield (l,t), tuple(map(rgba_between, (l, t, l, l+t)))
def unordered_equality(seq1, seq2):
"""
Tests to see if the contents of one sequence is contained in the other
and that they are of the same length.
"""
if len(seq1) != len(seq2):
return False
# if isinstance(seq1, dict) and isinstance(seq2, dict):
# seq1 = seq1.items()
# seq2 = seq2.items()
for val in seq1:
if val not in seq2:
return False
return True
def rect_area_pts(rect):
for l in xrange_(rect.left, rect.right):
for t in xrange_(rect.top, rect.bottom):
yield l, t
def rect_perimeter_pts(rect):
"""
Returns pts ((L, T) tuples) encompassing the perimeter of a rect.
The order is clockwise:
topleft to topright
topright to bottomright
bottomright to bottomleft
bottomleft to topleft
Duplicate pts are not returned
"""
clock_wise_from_top_left = (
[(l, rect.top) for l in xrange_(rect.left, rect.right) ],
[(rect.right -1, t) for t in xrange_(rect.top + 1, rect.bottom) ],
[(l, rect.bottom -1) for l in xrange_(rect.right -2, rect.left -1, -1)],
[(rect.left, t) for t in xrange_(rect.bottom -2, rect.top, -1)]
)
for line in clock_wise_from_top_left:
for pt in line: yield pt
def rect_outer_bounds(rect):
"""
Returns topleft outerbound if possible and then the other pts, that are
"exclusive" bounds of the rect
?------O
|RECT| ?|0)uterbound
|----|
O O
"""
return (
(rect.left is not 0 and [(rect.left-1, rect.top)] or []) +
[ rect.topright,
rect.bottomleft,
rect.bottomright]
)
def import_submodule(module):
m = __import__(module)
for n in module.split('.')[1:]:
m = getattr(m, n)
return m
def test():
"""
Lightweight test for helpers
"""
r = pygame.Rect(0, 0, 10, 10)
assert (
rect_outer_bounds ( r ) == [(10, 0), # tr
( 0, 10), # bl
(10, 10)] # br
)
assert len(list(rect_area_pts(r))) == 100
r = pygame.Rect(0, 0, 3, 3)
assert list(rect_perimeter_pts(r)) == [
(0, 0), (1, 0), (2, 0), # tl -> tr
(2, 1), (2, 2), # tr -> br
(1, 2), (0, 2), # br -> bl
(0, 1) # bl -> tl
]
if is_pygame_pkg:
module = 'pygame.tests.test_utils.unittest'
else:
module = 'test.test_utils.unittest'
assert import_submodule(module) is unittest
print ('Tests: OK')
################################################################################
| lgpl-2.1 |
gmatteo/pymatgen | pymatgen/io/feff/tests/test_sets.py | 5 | 11111 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import shutil
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.cif import CifFile, CifParser
from pymatgen.io.feff.inputs import Atoms, Header, Potential, Tags
from pymatgen.io.feff.sets import FEFFDictSet, MPELNESSet, MPEXAFSSet, MPXANESSet
from pymatgen.util.testing import PymatgenTest
class FeffInputSetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.333333 0.666667 0.503676
* 2 Co 0.666667 0.333333 0.003676
* 3 O 0.333333 0.666667 0.121324
* 4 O 0.666667 0.333333 0.621325"""
cif_file = os.path.join(PymatgenTest.TEST_FILES_DIR, "CoO19128.cif")
cls.structure = CifParser(cif_file).get_structures()[0]
cls.absorbing_atom = "O"
cls.mp_xanes = MPXANESSet(cls.absorbing_atom, cls.structure)
def test_get_header(self):
comment = "From cif file"
header = str(self.mp_xanes.header(source="CoO19128.cif", comment=comment))
print(header)
ref = self.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = self.mp_xanes.tags.as_dict()
self.assertEqual(tags["COREHOLE"], "FSR", "Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(self.mp_xanes.potential)
d, dr = Potential.pot_dict_from_string(POT)
self.assertEqual(d["Co"], 1, "Wrong symbols read in for Potential")
def test_get_feff_atoms(self):
atoms = str(self.mp_xanes.atoms)
self.assertEqual(
atoms.splitlines()[3].split()[4],
self.absorbing_atom,
"failed to create ATOMS string",
)
def test_to_and_from_dict(self):
f1_dict = self.mp_xanes.as_dict()
f2 = MPXANESSet.from_dict(f1_dict)
self.assertEqual(f1_dict, f2.as_dict())
def test_user_tag_settings(self):
tags_dict_ans = self.mp_xanes.tags.as_dict()
tags_dict_ans["COREHOLE"] = "RPA"
tags_dict_ans["EDGE"] = "L1"
user_tag_settings = {"COREHOLE": "RPA", "EDGE": "L1"}
mp_xanes_2 = MPXANESSet(self.absorbing_atom, self.structure, user_tag_settings=user_tag_settings)
self.assertEqual(mp_xanes_2.tags.as_dict(), tags_dict_ans)
def test_eels_to_from_dict(self):
elnes = MPELNESSet(
self.absorbing_atom,
self.structure,
radius=5.0,
beam_energy=100,
beam_direction=[1, 0, 0],
collection_angle=7,
convergence_angle=6,
)
elnes_dict = elnes.as_dict()
elnes_2 = MPELNESSet.from_dict(elnes_dict)
self.assertEqual(elnes_dict, elnes_2.as_dict())
def test_eels_tags_set(self):
radius = 5.0
user_eels_settings = {
"ENERGY": "4 0.04 0.1",
"BEAM_ENERGY": "200 1 0 1",
"ANGLES": "2 3",
}
elnes = MPELNESSet(
self.absorbing_atom,
self.structure,
radius=radius,
user_eels_settings=user_eels_settings,
)
elnes_2 = MPELNESSet(
self.absorbing_atom,
self.structure,
radius=radius,
beam_energy=100,
beam_direction=[1, 0, 0],
collection_angle=7,
convergence_angle=6,
)
self.assertEqual(elnes.tags["ELNES"]["ENERGY"], user_eels_settings["ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["BEAM_ENERGY"], user_eels_settings["BEAM_ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["ANGLES"], user_eels_settings["ANGLES"])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_ENERGY"], [100, 0, 1, 1])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_DIRECTION"], [1, 0, 0])
self.assertEqual(elnes_2.tags["ELNES"]["ANGLES"], [7, 6])
def test_reciprocal_tags_and_input(self):
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet(self.absorbing_atom, self.structure, user_tag_settings=user_tag_settings)
self.assertTrue("RECIPROCAL" in elnes.tags)
self.assertEqual(elnes.tags["TARGET"], 3)
self.assertEqual(elnes.tags["KMESH"], "1000")
self.assertEqual(elnes.tags["CIF"], "Co2O2.cif")
self.assertEqual(elnes.tags["COREHOLE"], "RPA")
all_input = elnes.all_input()
self.assertNotIn("ATOMS", all_input)
self.assertNotIn("POTENTIALS", all_input)
elnes.write_input()
structure = Structure.from_file("Co2O2.cif")
self.assertTrue(self.structure.matches(structure))
os.remove("HEADER")
os.remove("PARAMETERS")
os.remove("feff.inp")
os.remove("Co2O2.cif")
def test_small_system_EXAFS(self):
exafs_settings = MPEXAFSSet(self.absorbing_atom, self.structure)
self.assertFalse(exafs_settings.small_system)
self.assertTrue("RECIPROCAL" not in exafs_settings.tags)
user_tag_settings = {"RECIPROCAL": ""}
exafs_settings_2 = MPEXAFSSet(
self.absorbing_atom,
self.structure,
nkpts=1000,
user_tag_settings=user_tag_settings,
)
self.assertFalse(exafs_settings_2.small_system)
self.assertTrue("RECIPROCAL" not in exafs_settings_2.tags)
def test_number_of_kpoints(self):
user_tag_settings = {"RECIPROCAL": ""}
elnes = MPELNESSet(
self.absorbing_atom,
self.structure,
nkpts=1000,
user_tag_settings=user_tag_settings,
)
self.assertEqual(elnes.tags["KMESH"], [12, 12, 7])
def test_large_systems(self):
struct = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "La4Fe4O12.cif"))
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet("Fe", struct, user_tag_settings=user_tag_settings)
self.assertNotIn("RECIPROCAL", elnes.tags)
self.assertNotIn("KMESH", elnes.tags)
self.assertNotIn("CIF", elnes.tags)
self.assertNotIn("TARGET", elnes.tags)
def test_postfeffset(self):
self.mp_xanes.write_input(os.path.join(".", "xanes_3"))
feff_dict_input = FEFFDictSet.from_directory(os.path.join(".", "xanes_3"))
self.assertTrue(feff_dict_input.tags == Tags.from_file(os.path.join(".", "xanes_3/feff.inp")))
self.assertTrue(str(feff_dict_input.header()) == str(Header.from_file(os.path.join(".", "xanes_3/HEADER"))))
feff_dict_input.write_input("xanes_3_regen")
origin_tags = Tags.from_file(os.path.join(".", "xanes_3/PARAMETERS"))
output_tags = Tags.from_file(os.path.join(".", "xanes_3_regen/PARAMETERS"))
origin_mole = Atoms.cluster_from_file(os.path.join(".", "xanes_3/feff.inp"))
output_mole = Atoms.cluster_from_file(os.path.join(".", "xanes_3_regen/feff.inp"))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join(".", "xanes_3"))
shutil.rmtree(os.path.join(".", "xanes_3_regen"))
reci_mp_xanes = MPXANESSet(self.absorbing_atom, self.structure, user_tag_settings={"RECIPROCAL": ""})
reci_mp_xanes.write_input("xanes_reci")
feff_reci_input = FEFFDictSet.from_directory(os.path.join(".", "xanes_reci"))
self.assertTrue("RECIPROCAL" in feff_reci_input.tags)
feff_reci_input.write_input("Dup_reci")
self.assertTrue(os.path.exists(os.path.join(".", "Dup_reci", "HEADER")))
self.assertTrue(os.path.exists(os.path.join(".", "Dup_reci", "feff.inp")))
self.assertTrue(os.path.exists(os.path.join(".", "Dup_reci", "PARAMETERS")))
self.assertFalse(os.path.exists(os.path.join(".", "Dup_reci", "ATOMS")))
self.assertFalse(os.path.exists(os.path.join(".", "Dup_reci", "POTENTIALS")))
tags_original = Tags.from_file(os.path.join(".", "xanes_reci/feff.inp"))
tags_output = Tags.from_file(os.path.join(".", "Dup_reci/feff.inp"))
self.assertTrue(tags_original == tags_output)
stru_orig = Structure.from_file(os.path.join(".", "xanes_reci/Co2O2.cif"))
stru_reci = Structure.from_file(os.path.join(".", "Dup_reci/Co2O2.cif"))
self.assertTrue(stru_orig.__eq__(stru_reci))
shutil.rmtree(os.path.join(".", "Dup_reci"))
shutil.rmtree(os.path.join(".", "xanes_reci"))
def test_post_distdiff(self):
feff_dict_input = FEFFDictSet.from_directory(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test"))
self.assertTrue(
feff_dict_input.tags == Tags.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/feff.inp"))
)
self.assertTrue(
str(feff_dict_input.header())
== str(Header.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/HEADER")))
)
feff_dict_input.write_input("feff_dist_regen")
origin_tags = Tags.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/PARAMETERS"))
output_tags = Tags.from_file(os.path.join(".", "feff_dist_regen/PARAMETERS"))
origin_mole = Atoms.cluster_from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/feff.inp"))
output_mole = Atoms.cluster_from_file(os.path.join(".", "feff_dist_regen/feff.inp"))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join(".", "feff_dist_regen"))
if __name__ == "__main__":
unittest.main()
| mit |
tinfoil/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_input.py | 125 | 2612 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TestInput(object):
"""Groups information about a test for easy passing of data."""
def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None):
# TestInput objects are normally constructed by the manager and passed
# to the workers, but these some fields are set lazily in the workers where possible
# because they require us to look at the filesystem and we want to be able to do that in parallel.
self.test_name = test_name
self.timeout = timeout # in msecs; should rename this for consistency
self.requires_lock = requires_lock
self.reference_files = reference_files
self.should_run_pixel_tests = should_run_pixel_tests
def __repr__(self):
return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests)
| bsd-3-clause |
elover/python-django-blog | myblog/pygments/styles/murphy.py | 364 | 2751 | # -*- coding: utf-8 -*-
"""
pygments.styles.murphy
~~~~~~~~~~~~~~~~~~~~~~
Murphy's style from CodeRay.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class MurphyStyle(Style):
"""
Murphy's style from CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#666 italic",
Comment.Preproc: "#579 noitalic",
Comment.Special: "#c00 bold",
Keyword: "bold #289",
Keyword.Pseudo: "#08f",
Keyword.Type: "#66f",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#072",
Name.Function: "bold #5ed",
Name.Class: "bold #e9e",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#036",
Name.Variable.Instance: "#aaf",
Name.Variable.Class: "#ccf",
Name.Variable.Global: "#f84",
Name.Constant: "bold #5ed",
Name.Label: "bold #970",
Name.Entity: "#800",
Name.Attribute: "#007",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#e0e0ff",
String.Char: "#88F bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#e0e0ff #000",
String.Symbol: "#fc8 bg:",
String.Other: "#f88",
Number: "bold #60E",
Number.Integer: "bold #66f",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| mit |
strk/QGIS | python/plugins/processing/algs/qgis/GeometryConvert.py | 4 | 9956 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Gridify.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
from qgis.core import (QgsFeature,
QgsGeometry,
QgsMultiPoint,
QgsMultiLineString,
QgsLineString,
QgsPolygon,
QgsFeatureSink,
QgsWkbTypes,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class GeometryConvert(QgisAlgorithm):
INPUT = 'INPUT'
TYPE = 'TYPE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.types = [self.tr('Centroids'),
self.tr('Nodes'),
self.tr('Linestrings'),
self.tr('Multilinestrings'),
self.tr('Polygons')]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterEnum(self.TYPE,
self.tr('New geometry type'), options=self.types))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Converted')))
def name(self):
return 'convertgeometrytype'
def displayName(self):
return self.tr('Convert geometry type')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
index = self.parameterAsEnum(parameters, self.TYPE, context)
if index == 0:
newType = QgsWkbTypes.Point
elif index == 1:
newType = QgsWkbTypes.Point
if QgsWkbTypes.hasM(source.wkbType()):
newType = QgsWkbTypes.addM(newType)
if QgsWkbTypes.hasZ(source.wkbType()):
newType = QgsWkbTypes.addZ(newType)
elif index == 2:
newType = QgsWkbTypes.LineString
if QgsWkbTypes.hasM(source.wkbType()):
newType = QgsWkbTypes.addM(newType)
if QgsWkbTypes.hasZ(source.wkbType()):
newType = QgsWkbTypes.addZ(newType)
elif index == 3:
newType = QgsWkbTypes.MultiLineString
if QgsWkbTypes.hasM(source.wkbType()):
newType = QgsWkbTypes.addM(newType)
if QgsWkbTypes.hasZ(source.wkbType()):
newType = QgsWkbTypes.addZ(newType)
else:
newType = QgsWkbTypes.Polygon
if QgsWkbTypes.hasM(source.wkbType()):
newType = QgsWkbTypes.addM(newType)
if QgsWkbTypes.hasZ(source.wkbType()):
newType = QgsWkbTypes.addZ(newType)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), newType, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
features = source.getFeatures()
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
if not f.hasGeometry():
sink.addFeature(f, QgsFeatureSink.FastInsert)
else:
for p in self.convertGeometry(f.geometry(), index):
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(p)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
def convertGeometry(self, geom, target_type):
# returns an array of output geometries for the input geometry
if target_type == 0:
#centroid
return self.convertToCentroid(geom)
elif target_type == 1:
#nodes
return self.convertToNodes(geom)
elif target_type == 2:
#linestrings
return self.convertToLineStrings(geom)
elif target_type == 3:
#multilinestrings
return self.convertToMultiLineStrings(geom)
elif target_type == 4:
#polygon
return self.convertToPolygon(geom)
def convertToCentroid(self, geom):
return [geom.centroid()]
def convertToNodes(self, geom):
mp = QgsMultiPoint()
# TODO: mega inefficient - needs rework when geometry iterators land
# (but at least it doesn't lose Z/M values)
for g in geom.constGet().coordinateSequence():
for r in g:
for p in r:
mp.addGeometry(p)
return [QgsGeometry(mp)]
def convertToLineStrings(self, geom):
if QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.PointGeometry:
raise QgsProcessingException(
self.tr('Cannot convert from {0} to LineStrings').format(QgsWkbTypes.displayString(geom.wkbType())))
elif QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.LineGeometry:
if QgsWkbTypes.isMultiType(geom.wkbType()):
return geom.asGeometryCollection()
else:
#line to line
return [geom]
else:
# polygons to lines
# we just use the boundary here - that consists of all rings in the (multi)polygon
boundary = QgsGeometry(geom.constGet().boundary())
# boundary will be multipart
return boundary.asGeometryCollection()
def convertToMultiLineStrings(self, geom):
if QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.PointGeometry:
raise QgsProcessingException(
self.tr('Cannot convert from {0} to MultiLineStrings').format(QgsWkbTypes.displayString(geom.wkbType())))
elif QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.LineGeometry:
if QgsWkbTypes.isMultiType(geom.wkbType()):
return [geom]
else:
# line to multiLine
ml = QgsMultiLineString()
ml.addGeometry(geom.constGet().clone())
return [QgsGeometry(ml)]
else:
# polygons to multilinestring
# we just use the boundary here - that consists of all rings in the (multi)polygon
return [QgsGeometry(geom.constGet().boundary())]
def convertToPolygon(self, geom):
if QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.PointGeometry and geom.constGet().nCoordinates() < 3:
raise QgsProcessingException(
self.tr('Cannot convert from Point to Polygon').format(QgsWkbTypes.displayString(geom.wkbType())))
elif QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.PointGeometry:
# multipoint with at least 3 points
# TODO: mega inefficient - needs rework when geometry iterators land
# (but at least it doesn't lose Z/M values)
points = []
for g in geom.constGet().coordinateSequence():
for r in g:
for p in r:
points.append(p)
linestring = QgsLineString(points)
linestring.close()
p = QgsPolygon()
p.setExteriorRing(linestring)
return [QgsGeometry(p)]
elif QgsWkbTypes.geometryType(geom.wkbType()) == QgsWkbTypes.LineGeometry:
if QgsWkbTypes.isMultiType(geom.wkbType()):
parts = []
for i in range(geom.constGet().numGeometries()):
p = QgsPolygon()
linestring = geom.constGet().geometryN(i).clone()
linestring.close()
p.setExteriorRing(linestring)
parts.append(QgsGeometry(p))
return QgsGeometry.collectGeometry(parts)
else:
# linestring to polygon
p = QgsPolygon()
linestring = geom.constGet().clone()
linestring.close()
p.setExteriorRing(linestring)
return [QgsGeometry(p)]
else:
#polygon
if QgsWkbTypes.isMultiType(geom.wkbType()):
return geom.asGeometryCollection()
else:
return [geom]
| gpl-2.0 |
masamichi/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/utils/hmac.py | 403 | 3286 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
(This file is modified from the standard library version to do faster
copying)
"""
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 |
Anonymouslemming/ansible | test/units/modules/network/vyos/test_vyos_system.py | 113 | 3870 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.vyos import vyos_system
from .vyos_module import TestVyosModule, load_fixture, set_module_args
class TestVyosSystemModule(TestVyosModule):
module = vyos_system
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.vyos.vyos_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.vyos.vyos_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('vyos_config_config.cfg')
def test_vyos_system_hostname(self):
set_module_args(dict(host_name='foo'))
commands = ["set system host-name 'foo'"]
self.execute_module(changed=True, commands=commands)
def test_vyos_system_clear_hostname(self):
set_module_args(dict(host_name='foo', state='absent'))
commands = ["delete system host-name"]
self.execute_module(changed=True, commands=commands)
def test_vyos_remove_single_name_server(self):
set_module_args(dict(name_server=['8.8.4.4'], state='absent'))
commands = ["delete system name-server '8.8.4.4'"]
self.execute_module(changed=True, commands=commands)
def test_vyos_system_domain_name(self):
set_module_args(dict(domain_name='example2.com'))
commands = ["set system domain-name 'example2.com'"]
self.execute_module(changed=True, commands=commands)
def test_vyos_system_clear_domain_name(self):
set_module_args(dict(domain_name='example.com', state='absent'))
commands = ['delete system domain-name']
self.execute_module(changed=True, commands=commands)
def test_vyos_system_domain_search(self):
set_module_args(dict(domain_search=['foo.example.com', 'bar.example.com']))
commands = ["set system domain-search domain 'foo.example.com'",
"set system domain-search domain 'bar.example.com'"]
self.execute_module(changed=True, commands=commands)
def test_vyos_system_clear_domain_search(self):
set_module_args(dict(domain_search=[]))
commands = ['delete system domain-search domain']
self.execute_module(changed=True, commands=commands)
def test_vyos_system_no_change(self):
set_module_args(dict(host_name='router', domain_name='example.com', name_server=['8.8.8.8', '8.8.4.4']))
result = self.execute_module()
self.assertEqual([], result['commands'])
def test_vyos_system_clear_all(self):
set_module_args(dict(state='absent'))
commands = ['delete system host-name',
'delete system domain-search domain',
'delete system domain-name',
'delete system name-server']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
dendisuhubdy/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py | 14 | 2451 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sigmoid Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SigmoidBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation."""
def testBijector(self):
with self.test_session():
self.assertEqual("sigmoid", Sigmoid().name)
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = Sigmoid()
self.assertAllClose(y, bijector.forward(x).eval(), atol=0., rtol=1e-2)
self.assertAllClose(x, bijector.inverse(y).eval(), atol=0., rtol=1e-4)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval(), atol=0., rtol=1e-6)
self.assertAllClose(-ildj, bijector.forward_log_det_jacobian(
x, event_ndims=0).eval(), atol=0., rtol=1e-4)
def testScalarCongruency(self):
with self.test_session():
assert_scalar_congruency(Sigmoid(), lower_x=-7., upper_x=7.)
def testBijectiveAndFinite(self):
with self.test_session():
x = np.linspace(-7., 7., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
assert_bijective_and_finite(
Sigmoid(), x, y, event_ndims=0, atol=0., rtol=1e-4)
if __name__ == "__main__":
test.main()
| apache-2.0 |
haradashinya/flask-admin | flask_admin/contrib/sqla/tools.py | 7 | 6906 | import types
from sqlalchemy import tuple_, or_, and_, inspect
from sqlalchemy.ext.declarative.clsregistry import _class_resolver
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.associationproxy import ASSOCIATION_PROXY
from sqlalchemy.sql.operators import eq
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm.attributes import InstrumentedAttribute
from flask_admin._compat import filter_list, string_types
from flask_admin.tools import iterencode, iterdecode, escape # noqa: F401
def parse_like_term(term):
if term.startswith('^'):
stmt = '%s%%' % term[1:]
elif term.startswith('='):
stmt = term[1:]
else:
stmt = '%%%s%%' % term
return stmt
def filter_foreign_columns(base_table, columns):
"""
Return list of columns that belong to passed table.
:param base_table: Table to check against
:param columns: List of columns to filter
"""
return filter_list(lambda c: c.table == base_table, columns)
def get_primary_key(model):
"""
Return primary key name from a model. If the primary key consists of multiple columns,
return the corresponding tuple
:param model:
Model class
"""
mapper = model._sa_class_manager.mapper
pks = [mapper.get_property_by_column(c).key for c in mapper.primary_key]
if len(pks) == 1:
return pks[0]
elif len(pks) > 1:
return tuple(pks)
else:
return None
def has_multiple_pks(model):
"""
Return True, if the model has more than one primary key
"""
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
return len(model._sa_class_manager.mapper.primary_key) > 1
def tuple_operator_in(model_pk, ids):
"""The tuple_ Operator only works on certain engines like MySQL or Postgresql. It does not work with sqlite.
The function returns an or_ - operator, that containes and_ - operators for every single tuple in ids.
Example::
model_pk = [ColumnA, ColumnB]
ids = ((1,2), (1,3))
tuple_operator(model_pk, ids) -> or_( and_( ColumnA == 1, ColumnB == 2), and_( ColumnA == 1, ColumnB == 3) )
The returning operator can be used within a filter(), as it is just an or_ operator
"""
l = []
for id in ids:
k = []
for i in range(len(model_pk)):
k.append(eq(model_pk[i], id[i]))
l.append(and_(*k))
if len(l) >= 1:
return or_(*l)
else:
return None
def get_query_for_ids(modelquery, model, ids):
"""
Return a query object filtered by primary key values passed in `ids` argument.
Unfortunately, it is not possible to use `in_` filter if model has more than one
primary key.
"""
if has_multiple_pks(model):
# Decode keys to tuples
decoded_ids = [iterdecode(v) for v in ids]
# Get model primary key property references
model_pk = [getattr(model, name) for name in get_primary_key(model)]
try:
query = modelquery.filter(tuple_(*model_pk).in_(decoded_ids))
# Only the execution of the query will tell us, if the tuple_
# operator really works
query.all()
except DBAPIError:
query = modelquery.filter(tuple_operator_in(model_pk, decoded_ids))
else:
model_pk = getattr(model, get_primary_key(model))
query = modelquery.filter(model_pk.in_(ids))
return query
def get_columns_for_field(field):
if (not field or
not hasattr(field, 'property') or
not hasattr(field.property, 'columns') or
not field.property.columns):
raise Exception('Invalid field %s: does not contains any columns.' % field)
return field.property.columns
def need_join(model, table):
"""
Check if join to a table is necessary.
"""
return table not in model._sa_class_manager.mapper.tables
def get_field_with_path(model, name, return_remote_proxy_attr=True):
"""
Resolve property by name and figure out its join path.
Join path might contain both properties and tables.
"""
path = []
# For strings, resolve path
if isinstance(name, string_types):
# create a copy to keep original model as `model`
current_model = model
value = None
for attribute in name.split('.'):
value = getattr(current_model, attribute)
if is_association_proxy(value):
relation_values = value.attr
if return_remote_proxy_attr:
value = value.remote_attr
else:
relation_values = [value]
for relation_value in relation_values:
if is_relationship(relation_value):
current_model = relation_value.property.mapper.class_
table = current_model.__table__
if need_join(model, table):
path.append(relation_value)
attr = value
else:
attr = name
# Determine joins if table.column (relation object) is provided
if isinstance(attr, InstrumentedAttribute) or is_association_proxy(attr):
columns = get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can only handle one column for %s' % name)
column = columns[0]
# TODO: Use SQLAlchemy "path-finder" to find exact join path to the target property
if need_join(model, column.table):
path.append(column.table)
return attr, path
# copied from sqlalchemy-utils
def get_hybrid_properties(model):
return dict(
(key, prop)
for key, prop in inspect(model).all_orm_descriptors.items()
if isinstance(prop, hybrid_property)
)
def is_hybrid_property(model, attr_name):
if isinstance(attr_name, string_types):
names = attr_name.split('.')
last_model = model
for i in range(len(names) - 1):
attr = getattr(last_model, names[i])
if is_association_proxy(attr):
attr = attr.remote_attr
last_model = attr.property.argument
if isinstance(last_model, _class_resolver):
last_model = model._decl_class_registry[last_model.arg]
elif isinstance(last_model, types.FunctionType):
last_model = last_model()
last_name = names[-1]
return last_name in get_hybrid_properties(last_model)
else:
return attr_name.name in get_hybrid_properties(model)
def is_relationship(attr):
return hasattr(attr, 'property') and hasattr(attr.property, 'direction')
def is_association_proxy(attr):
return hasattr(attr, 'extension_type') and attr.extension_type == ASSOCIATION_PROXY
| bsd-3-clause |
311labs/SRL | radmin/python/config.py | 1 | 6036 | """
module: radmin.confg
Config file logic
Created by Ian Starnes 2007-11-27
"""
class Config(object):
"""The Config class."""
def __init__(self, filename):
self.filename = filename
self._items = {}
execfile(filename, globals(), self._items)
def add(self, key, value):
self._items[key] = value
def has_key(self, key):
return self._items.has_key(key)
def get(self, field, default=None, required=False):
"returns the field as a string"
if self._items.has_key(field):
return self._items[field]
if required:
raise Exception("missing required field(%s)!" % field)
return default
class IniFile(object):
"""docstring for IniFile"""
SYNTAX_COMMENTS = ['#', ';']
SYNTAX_DELIMITER = '='
def __init__(self, filename):
self.filename = filename
self.sections = []
self._section_dict = {}
def hasSection(self, section):
return section in self.sections
def getSection(self, section):
if section not in self.sections:
self._section_dict[section] = {}
self.sections.append(section)
return self._section_dict[section]
def load(self):
f = open(self.filename, 'r')
section = self.getSection("")
quote = None
key = None
value = None
for line in f.readlines():
# if inside a quote read anything until end quote
if quote != None:
new_value = line.strip()
if new_value[-1] == quote:
value += new_value[:-1]
quote = None
section[key] = value
continue
value += new_value
# ignore comments or empty lines
if line[0] in IniFile.SYNTAX_COMMENTS or len(line) < 3:
continue
# check for new section
if line[0] == '[':
section = self.getSection(line[1:line.find("]")])
continue
# check for delimiter
if line.count(IniFile.SYNTAX_DELIMITER) >= 1:
#key, value = line.split('=')
pos = line.find(IniFile.SYNTAX_DELIMITER)
key = line[:pos].strip()
value = line[pos+len(IniFile.SYNTAX_DELIMITER):].strip()
if value[0] == '"':
quote = '"'
elif value[0] == "'":
quote = "'"
# parse the string
if quote != None:
if value[-1] == quote:
value = value[1:-1]
quote = None
else:
value = value[1:]
continue
else:
if value.count('.') == 1:
flt = value.split('.')
if flt[0].isdigit() and flt[1].isdigit():
value = float(value)
elif value.isdigit():
value = int(value)
section[key] = value
else:
print "no delimiter found in line: %s" % line
def _write(self, writer, comment=None):
f = writer
if comment:
f.write("# %s\n\n" % self.comment)
for name in self.sections:
section = self._section_dict[name]
if len(section) > 0:
f.write("[%s]\n" % name)
sorted_keys = section.keys()
sorted_keys.sort()
for key in sorted_keys:
value = section[key]
if type(value) is str:
if value.count('\n') > 0 or value.count('"') > 0:
f.write('%s="""%s"""\n\n' % (key, value))
else:
f.write('%s="%s"\n' % (key, value))
else:
f.write('%s=%s\n' % (key, value))
f.write("\n")
def save(self, comment=None):
f = open(self.filename, 'w')
self._write(f, comment)
f.close()
def __str__(self):
import StringIO
writer = StringIO.StringIO()
self._write(writer)
out = writer.getvalue()
writer.close()
return out
def asHTML(self):
"""generate html output"""
import StringIO, cgi
f = StringIO.StringIO()
f.write("<div class='ini'>\n")
for name in self.sections:
section = self._section_dict[name]
if len(section) > 0:
error_summary = False
if name.lower().count("error") > 0:
error_summary = True
f.write("<div class='ini_section'>\n")
count = 0
f.write("<h3>%s</h3>\n" % name)
if error_summary:
f.write("<div class='ini_errors'>\n")
sorted_keys = section.keys()
sorted_keys.sort()
for key in sorted_keys:
value = section[key]
if not error_summary:
f.write("<span class='ini_key'>%s:</span> <span class='ini_value'>%s</span>\n" %(key, value))
else:
f.write("<span class='ini_error'>%s</span>\n" % cgi.escape(value))
count += 1
if count == 3 or error_summary:
f.write("<br />\n")
count = 0
if error_summary:
f.write("</div>\n")
f.write("</div>\n")
f.write("</div>\n")
content = f.getvalue()
f.close()
return content
| mit |
harej/wikiproject_scripts | reportsbot/config.py | 2 | 2143 | # -*- coding: utf-8 -*-
import errno
from os import path
import yaml
from .exceptions import ConfigError
__all__ = ["Config"]
class Config:
"""Stores general-purpose bot configuration."""
def __init__(self, base_dir):
self._base_dir = base_dir
self._data = {}
self._load()
def _load(self):
"""Load or reload the bot's main configuration file (config.yml)."""
filename = path.join(self._base_dir, "config.yml")
try:
with open(filename) as fp:
self._data = yaml.full_load(fp)
except (OSError, yaml.error.YAMLError) as exc:
if exc.errno == errno.ENOENT: # Ignore missing file; use defaults
return
err = "Couldn't read config file ({}):\n{}"
raise ConfigError(err.format(filename, exc)) from None
def _get_sql_info(self, which):
"""Get some SQL connection info."""
sql = self._data.get("sql", {})
info = sql.get("all", {}).copy()
info.update(sql.get(which, {}))
return info
@property
def dir(self):
"""Return the bot's config directory."""
return self._base_dir
@property
def username(self):
"""Return the bot's username."""
return self._data.get("username")
@property
def default_project(self):
"""Return the default site project, like 'wikipedia'."""
return self._data.get("defaults", {}).get("project", "wikipedia")
@property
def default_lang(self):
"""Return the default site language, like 'en'."""
return self._data.get("defaults", {}).get("lang", "en")
def get_wiki_sql(self, site):
"""Return SQL connection info for the wiki DB for the given site."""
info = self._get_sql_info("wiki")
for key, val in info.items(): # Convert db="{site}_p" to "enwiki_p"
if isinstance(val, str):
info[key] = val.format(site=site)
return info
def get_local_sql(self):
"""Return SQL connection info for the local Reports bot/WPX DB."""
return self._get_sql_info("local")
| mit |
DemocracyLab/CivicTechExchange | common/management/commands/project_external_updates.py | 1 | 4929 | from django.core.management.base import BaseCommand
from common.helpers.db import bulk_delete
from common.helpers.github import fetch_github_info, get_owner_repo_name_from_public_url, \
get_repo_endpoint_from_owner_repo_name, get_repo_names_from_owner_repo_name, get_branch_name_from_public_url
from common.helpers.date_helpers import datetime_field_to_datetime
from django.conf import settings
import pytz
import traceback
class Command(BaseCommand):
def handle(self, *args, **options):
project_github_links = get_project_github_links()
for github_link in project_github_links:
try:
if github_link.link_project.is_searchable:
handle_project_github_updates(github_link)
except:
# Keep processing if we run into errors with a particular update
print('Error processing Github Link: ' + github_link.link_url)
print(traceback.format_exc())
pass
def get_project_github_links():
from civictechprojects.models import ProjectLink
return ProjectLink.objects.filter(link_url__icontains='github.com/').exclude(link_project__isnull=True)
def handle_project_github_updates(project_github_link):
project = project_github_link.link_project
print('Handling updates for project {id} github link: {url}'.format(id=project.id, url=project_github_link.link_url))
last_updated_time = datetime_field_to_datetime(get_project_latest_commit_date(project_github_link.link_project))
owner_repo_name = get_owner_repo_name_from_public_url(project_github_link.link_url)
branch_name = get_branch_name_from_public_url(project_github_link.link_url)
repo_names = get_repo_names_from_owner_repo_name(owner_repo_name)
raw_commits = []
for repo_name in repo_names:
repo_url = get_repo_endpoint_from_owner_repo_name(repo_name, last_updated_time, branch_name)
print('Ingesting: ' + repo_url)
repo_info = fetch_github_info(repo_url)
if repo_info is not None and len(repo_info) > 0:
repo_display_name = repo_name[0] + '/' + repo_name[1]
raw_commits = raw_commits + list(map(lambda commit: [repo_display_name, commit, branch_name], repo_info))
if len(raw_commits) > 0:
# Take the most recent top X commits
raw_commits.sort(key=lambda commit: commit[1]['commit']['author']['date'], reverse=True)
raw_commits = raw_commits[:settings.MAX_COMMITS_PER_PROJECT]
add_commits_to_database(project, raw_commits)
latest_commit_date = raw_commits[0][1]['commit']['author']['date']
update_if_commit_after_project_updated_time(project, latest_commit_date)
remove_old_commits(project)
def update_if_commit_after_project_updated_time(project, latest_commit_date_string):
project_updated_time = datetime_field_to_datetime(project.project_date_modified)
latest_commit_time = datetime_field_to_datetime(latest_commit_date_string)
# Need to add timezone info to time from github
latest_commit_time = pytz.timezone("UTC").localize(latest_commit_time)
if project_updated_time < latest_commit_time:
print('Updating project {id} to latest timestamp: {time}'.format(id=project.id, time=latest_commit_date_string))
project.update_timestamp(latest_commit_time)
project.recache()
else:
print('Did not update project {id} because last commit at {commit_time} before project updated time {project_update}'.format(
id=project.id,
commit_time=latest_commit_date_string,
project_update=project_updated_time))
def add_commits_to_database(project, commits_to_ingest):
from civictechprojects.models import ProjectCommit
for commit_info in commits_to_ingest:
branch = commit_info[2] if commit_info[2] is not None else 'master'
display_name = commit_info[0]
commit = commit_info[1]
ProjectCommit.create(project, display_name, branch, commit)
project.recache()
def get_project_latest_commit_date(project):
from civictechprojects.models import ProjectCommit
latest_commit = ProjectCommit.objects.filter(commit_project=project.id).order_by('-commit_date').first()
return latest_commit and latest_commit.commit_date
def remove_old_commits(project):
from civictechprojects.models import ProjectCommit
# Get the number of commits to delete
commit_count = ProjectCommit.objects.filter(commit_project=project.id).count()
# Delete them
if commit_count > settings.MAX_COMMITS_PER_PROJECT:
print('Deleting {ct} commits from project {id}'.format(ct=commit_count - settings.MAX_COMMITS_PER_PROJECT, id=project.id))
commits_to_remove = ProjectCommit.objects.filter(commit_project=project.id)\
.order_by('-commit_date')[settings.MAX_COMMITS_PER_PROJECT:]
bulk_delete(ProjectCommit, commits_to_remove) | mit |
rodrigolucianocosta/ControleEstoque | rOne/Storage101/django-localflavor/django-localflavor-1.3/tests/test_fi.py | 10 | 16297 | from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.fi.forms import FIMunicipalitySelect, FISocialSecurityNumber, FIZipCodeField
class FILocalFlavorTests(SimpleTestCase):
def test_FIMunicipalitySelect(self):
f = FIMunicipalitySelect()
out = '''<select name="municipalities">
<option value="akaa">Akaa</option>
<option value="alajarvi">Alaj\xe4rvi</option>
<option value="alavieska">Alavieska</option>
<option value="alavus">Alavus</option>
<option value="artjarvi">Artj\xe4rvi</option>
<option value="asikkala">Asikkala</option>
<option value="askola">Askola</option>
<option value="aura">Aura</option>
<option value="brando">Br\xe4nd\xf6</option>
<option value="eckero">Ecker\xf6</option>
<option value="enonkoski">Enonkoski</option>
<option value="enontekio">Enonteki\xf6</option>
<option value="espoo">Espoo</option>
<option value="eura">Eura</option>
<option value="eurajoki">Eurajoki</option>
<option value="evijarvi">Evij\xe4rvi</option>
<option value="finstrom">Finstr\xf6m</option>
<option value="forssa">Forssa</option>
<option value="foglo">F\xf6gl\xf6</option>
<option value="geta">Geta</option>
<option value="haapajarvi">Haapaj\xe4rvi</option>
<option value="haapavesi">Haapavesi</option>
<option value="hailuoto">Hailuoto</option>
<option value="halsua">Halsua</option>
<option value="hamina">Hamina</option>
<option value="hammarland">Hammarland</option>
<option value="hankasalmi">Hankasalmi</option>
<option value="hanko">Hanko</option>
<option value="harjavalta">Harjavalta</option>
<option value="hartola">Hartola</option>
<option value="hattula">Hattula</option>
<option value="haukipudas">Haukipudas</option>
<option value="hausjarvi">Hausj\xe4rvi</option>
<option value="heinola">Heinola</option>
<option value="heinavesi">Hein\xe4vesi</option>
<option value="helsinki">Helsinki</option>
<option value="hirvensalmi">Hirvensalmi</option>
<option value="hollola">Hollola</option>
<option value="honkajoki">Honkajoki</option>
<option value="huittinen">Huittinen</option>
<option value="humppila">Humppila</option>
<option value="hyrynsalmi">Hyrynsalmi</option>
<option value="hyvinkaa">Hyvink\xe4\xe4</option>
<option value="hameenkoski">H\xe4meenkoski</option>
<option value="hameenkyro">H\xe4meenkyr\xf6</option>
<option value="hameenlinna">H\xe4meenlinna</option>
<option value="ii">Ii</option>
<option value="iisalmi">Iisalmi</option>
<option value="iitti">Iitti</option>
<option value="ikaalinen">Ikaalinen</option>
<option value="ilmajoki">Ilmajoki</option>
<option value="ilomantsi">Ilomantsi</option>
<option value="imatra">Imatra</option>
<option value="inari">Inari</option>
<option value="inkoo">Inkoo</option>
<option value="isojoki">Isojoki</option>
<option value="isokyro">Isokyr\xf6</option>
<option value="jalasjarvi">Jalasj\xe4rvi</option>
<option value="janakkala">Janakkala</option>
<option value="joensuu">Joensuu</option>
<option value="jokioinen">Jokioinen</option>
<option value="jomala">Jomala</option>
<option value="joroinen">Joroinen</option>
<option value="joutsa">Joutsa</option>
<option value="juankoski">Juankoski</option>
<option value="juuka">Juuka</option>
<option value="juupajoki">Juupajoki</option>
<option value="juva">Juva</option>
<option value="jyvaskyla">Jyv\xe4skyl\xe4</option>
<option value="jamijarvi">J\xe4mij\xe4rvi</option>
<option value="jamsa">J\xe4ms\xe4</option>
<option value="jarvenpaa">J\xe4rvenp\xe4\xe4</option>
<option value="kaarina">Kaarina</option>
<option value="kaavi">Kaavi</option>
<option value="kajaani">Kajaani</option>
<option value="kalajoki">Kalajoki</option>
<option value="kangasala">Kangasala</option>
<option value="kangasniemi">Kangasniemi</option>
<option value="kankaanpaa">Kankaanp\xe4\xe4</option>
<option value="kannonkoski">Kannonkoski</option>
<option value="kannus">Kannus</option>
<option value="karijoki">Karijoki</option>
<option value="karjalohja">Karjalohja</option>
<option value="karkkila">Karkkila</option>
<option value="karstula">Karstula</option>
<option value="karttula">Karttula</option>
<option value="karvia">Karvia</option>
<option value="kaskinen">Kaskinen</option>
<option value="kauhajoki">Kauhajoki</option>
<option value="kauhava">Kauhava</option>
<option value="kauniainen">Kauniainen</option>
<option value="kaustinen">Kaustinen</option>
<option value="keitele">Keitele</option>
<option value="kemi">Kemi</option>
<option value="kemijarvi">Kemij\xe4rvi</option>
<option value="keminmaa">Keminmaa</option>
<option value="kemionsaari">Kemi\xf6nsaari</option>
<option value="kempele">Kempele</option>
<option value="kerava">Kerava</option>
<option value="kerimaki">Kerim\xe4ki</option>
<option value="kesalahti">Kes\xe4lahti</option>
<option value="keuruu">Keuruu</option>
<option value="kihnio">Kihni\xf6</option>
<option value="kiikoinen">Kiikoinen</option>
<option value="kiiminki">Kiiminki</option>
<option value="kinnula">Kinnula</option>
<option value="kirkkonummi">Kirkkonummi</option>
<option value="kitee">Kitee</option>
<option value="kittila">Kittil\xe4</option>
<option value="kiuruvesi">Kiuruvesi</option>
<option value="kivijarvi">Kivij\xe4rvi</option>
<option value="kokemaki">Kokem\xe4ki</option>
<option value="kokkola">Kokkola</option>
<option value="kolari">Kolari</option>
<option value="konnevesi">Konnevesi</option>
<option value="kontiolahti">Kontiolahti</option>
<option value="korsnas">Korsn\xe4s</option>
<option value="koskitl">Koski Tl</option>
<option value="kotka">Kotka</option>
<option value="kouvola">Kouvola</option>
<option value="kristiinankaupunki">Kristiinankaupunki</option>
<option value="kruunupyy">Kruunupyy</option>
<option value="kuhmalahti">Kuhmalahti</option>
<option value="kuhmo">Kuhmo</option>
<option value="kuhmoinen">Kuhmoinen</option>
<option value="kumlinge">Kumlinge</option>
<option value="kuopio">Kuopio</option>
<option value="kuortane">Kuortane</option>
<option value="kurikka">Kurikka</option>
<option value="kustavi">Kustavi</option>
<option value="kuusamo">Kuusamo</option>
<option value="kylmakoski">Kylm\xe4koski</option>
<option value="kyyjarvi">Kyyj\xe4rvi</option>
<option value="karkola">K\xe4rk\xf6l\xe4</option>
<option value="karsamaki">K\xe4rs\xe4m\xe4ki</option>
<option value="kokar">K\xf6kar</option>
<option value="koylio">K\xf6yli\xf6</option>
<option value="lahti">Lahti</option>
<option value="laihia">Laihia</option>
<option value="laitila">Laitila</option>
<option value="lapinjarvi">Lapinj\xe4rvi</option>
<option value="lapinlahti">Lapinlahti</option>
<option value="lappajarvi">Lappaj\xe4rvi</option>
<option value="lappeenranta">Lappeenranta</option>
<option value="lapua">Lapua</option>
<option value="laukaa">Laukaa</option>
<option value="lavia">Lavia</option>
<option value="lemi">Lemi</option>
<option value="lemland">Lemland</option>
<option value="lempaala">Lemp\xe4\xe4l\xe4</option>
<option value="leppavirta">Lepp\xe4virta</option>
<option value="lestijarvi">Lestij\xe4rvi</option>
<option value="lieksa">Lieksa</option>
<option value="lieto">Lieto</option>
<option value="liminka">Liminka</option>
<option value="liperi">Liperi</option>
<option value="lohja">Lohja</option>
<option value="loimaa">Loimaa</option>
<option value="loppi">Loppi</option>
<option value="loviisa">Loviisa</option>
<option value="luhanka">Luhanka</option>
<option value="lumijoki">Lumijoki</option>
<option value="lumparland">Lumparland</option>
<option value="luoto">Luoto</option>
<option value="luumaki">Luum\xe4ki</option>
<option value="luvia">Luvia</option>
<option value="lansi-turunmaa">L\xe4nsi-Turunmaa</option>
<option value="maalahti">Maalahti</option>
<option value="maaninka">Maaninka</option>
<option value="maarianhamina">Maarianhamina</option>
<option value="marttila">Marttila</option>
<option value="masku">Masku</option>
<option value="merijarvi">Merij\xe4rvi</option>
<option value="merikarvia">Merikarvia</option>
<option value="miehikkala">Miehikk\xe4l\xe4</option>
<option value="mikkeli">Mikkeli</option>
<option value="muhos">Muhos</option>
<option value="multia">Multia</option>
<option value="muonio">Muonio</option>
<option value="mustasaari">Mustasaari</option>
<option value="muurame">Muurame</option>
<option value="mynamaki">Myn\xe4m\xe4ki</option>
<option value="myrskyla">Myrskyl\xe4</option>
<option value="mantsala">M\xe4nts\xe4l\xe4</option>
<option value="mantta-vilppula">M\xe4ntt\xe4-Vilppula</option>
<option value="mantyharju">M\xe4ntyharju</option>
<option value="naantali">Naantali</option>
<option value="nakkila">Nakkila</option>
<option value="nastola">Nastola</option>
<option value="nilsia">Nilsi\xe4</option>
<option value="nivala">Nivala</option>
<option value="nokia">Nokia</option>
<option value="nousiainen">Nousiainen</option>
<option value="nummi-pusula">Nummi-Pusula</option>
<option value="nurmes">Nurmes</option>
<option value="nurmijarvi">Nurmij\xe4rvi</option>
<option value="narpio">N\xe4rpi\xf6</option>
<option value="oravainen">Oravainen</option>
<option value="orimattila">Orimattila</option>
<option value="oripaa">Orip\xe4\xe4</option>
<option value="orivesi">Orivesi</option>
<option value="oulainen">Oulainen</option>
<option value="oulu">Oulu</option>
<option value="oulunsalo">Oulunsalo</option>
<option value="outokumpu">Outokumpu</option>
<option value="padasjoki">Padasjoki</option>
<option value="paimio">Paimio</option>
<option value="paltamo">Paltamo</option>
<option value="parikkala">Parikkala</option>
<option value="parkano">Parkano</option>
<option value="pedersore">Peders\xf6re</option>
<option value="pelkosenniemi">Pelkosenniemi</option>
<option value="pello">Pello</option>
<option value="perho">Perho</option>
<option value="pertunmaa">Pertunmaa</option>
<option value="petajavesi">Pet\xe4j\xe4vesi</option>
<option value="pieksamaki">Pieks\xe4m\xe4ki</option>
<option value="pielavesi">Pielavesi</option>
<option value="pietarsaari">Pietarsaari</option>
<option value="pihtipudas">Pihtipudas</option>
<option value="pirkkala">Pirkkala</option>
<option value="polvijarvi">Polvij\xe4rvi</option>
<option value="pomarkku">Pomarkku</option>
<option value="pori">Pori</option>
<option value="pornainen">Pornainen</option>
<option value="porvoo">Porvoo</option>
<option value="posio">Posio</option>
<option value="pudasjarvi">Pudasj\xe4rvi</option>
<option value="pukkila">Pukkila</option>
<option value="punkaharju">Punkaharju</option>
<option value="punkalaidun">Punkalaidun</option>
<option value="puolanka">Puolanka</option>
<option value="puumala">Puumala</option>
<option value="pyhtaa">Pyht\xe4\xe4</option>
<option value="pyhajoki">Pyh\xe4joki</option>
<option value="pyhajarvi">Pyh\xe4j\xe4rvi</option>
<option value="pyhanta">Pyh\xe4nt\xe4</option>
<option value="pyharanta">Pyh\xe4ranta</option>
<option value="palkane">P\xe4lk\xe4ne</option>
<option value="poytya">P\xf6yty\xe4</option>
<option value="raahe">Raahe</option>
<option value="raasepori">Raasepori</option>
<option value="raisio">Raisio</option>
<option value="rantasalmi">Rantasalmi</option>
<option value="ranua">Ranua</option>
<option value="rauma">Rauma</option>
<option value="rautalampi">Rautalampi</option>
<option value="rautavaara">Rautavaara</option>
<option value="rautjarvi">Rautj\xe4rvi</option>
<option value="reisjarvi">Reisj\xe4rvi</option>
<option value="riihimaki">Riihim\xe4ki</option>
<option value="ristiina">Ristiina</option>
<option value="ristijarvi">Ristij\xe4rvi</option>
<option value="rovaniemi">Rovaniemi</option>
<option value="ruokolahti">Ruokolahti</option>
<option value="ruovesi">Ruovesi</option>
<option value="rusko">Rusko</option>
<option value="raakkyla">R\xe4\xe4kkyl\xe4</option>
<option value="saarijarvi">Saarij\xe4rvi</option>
<option value="salla">Salla</option>
<option value="salo">Salo</option>
<option value="saltvik">Saltvik</option>
<option value="sastamala">Sastamala</option>
<option value="sauvo">Sauvo</option>
<option value="savitaipale">Savitaipale</option>
<option value="savonlinna">Savonlinna</option>
<option value="savukoski">Savukoski</option>
<option value="seinajoki">Sein\xe4joki</option>
<option value="sievi">Sievi</option>
<option value="siikainen">Siikainen</option>
<option value="siikajoki">Siikajoki</option>
<option value="siikalatva">Siikalatva</option>
<option value="siilinjarvi">Siilinj\xe4rvi</option>
<option value="simo">Simo</option>
<option value="sipoo">Sipoo</option>
<option value="siuntio">Siuntio</option>
<option value="sodankyla">Sodankyl\xe4</option>
<option value="soini">Soini</option>
<option value="somero">Somero</option>
<option value="sonkajarvi">Sonkaj\xe4rvi</option>
<option value="sotkamo">Sotkamo</option>
<option value="sottunga">Sottunga</option>
<option value="sulkava">Sulkava</option>
<option value="sund">Sund</option>
<option value="suomenniemi">Suomenniemi</option>
<option value="suomussalmi">Suomussalmi</option>
<option value="suonenjoki">Suonenjoki</option>
<option value="sysma">Sysm\xe4</option>
<option value="sakyla">S\xe4kyl\xe4</option>
<option value="taipalsaari">Taipalsaari</option>
<option value="taivalkoski">Taivalkoski</option>
<option value="taivassalo">Taivassalo</option>
<option value="tammela">Tammela</option>
<option value="tampere">Tampere</option>
<option value="tarvasjoki">Tarvasjoki</option>
<option value="tervo">Tervo</option>
<option value="tervola">Tervola</option>
<option value="teuva">Teuva</option>
<option value="tohmajarvi">Tohmaj\xe4rvi</option>
<option value="toholampi">Toholampi</option>
<option value="toivakka">Toivakka</option>
<option value="tornio">Tornio</option>
<option value="turku" selected="selected">Turku</option>
<option value="tuusniemi">Tuusniemi</option>
<option value="tuusula">Tuusula</option>
<option value="tyrnava">Tyrn\xe4v\xe4</option>
<option value="toysa">T\xf6ys\xe4</option>
<option value="ulvila">Ulvila</option>
<option value="urjala">Urjala</option>
<option value="utajarvi">Utaj\xe4rvi</option>
<option value="utsjoki">Utsjoki</option>
<option value="uurainen">Uurainen</option>
<option value="uusikaarlepyy">Uusikaarlepyy</option>
<option value="uusikaupunki">Uusikaupunki</option>
<option value="vaala">Vaala</option>
<option value="vaasa">Vaasa</option>
<option value="valkeakoski">Valkeakoski</option>
<option value="valtimo">Valtimo</option>
<option value="vantaa">Vantaa</option>
<option value="varkaus">Varkaus</option>
<option value="varpaisjarvi">Varpaisj\xe4rvi</option>
<option value="vehmaa">Vehmaa</option>
<option value="vesanto">Vesanto</option>
<option value="vesilahti">Vesilahti</option>
<option value="veteli">Veteli</option>
<option value="vierema">Vierem\xe4</option>
<option value="vihanti">Vihanti</option>
<option value="vihti">Vihti</option>
<option value="viitasaari">Viitasaari</option>
<option value="vimpeli">Vimpeli</option>
<option value="virolahti">Virolahti</option>
<option value="virrat">Virrat</option>
<option value="vardo">V\xe5rd\xf6</option>
<option value="vahakyro">V\xe4h\xe4kyr\xf6</option>
<option value="voyri-maksamaa">V\xf6yri-Maksamaa</option>
<option value="yli-ii">Yli-Ii</option>
<option value="ylitornio">Ylitornio</option>
<option value="ylivieska">Ylivieska</option>
<option value="ylojarvi">Yl\xf6j\xe4rvi</option>
<option value="ypaja">Yp\xe4j\xe4</option>
<option value="ahtari">\xc4ht\xe4ri</option>
<option value="aanekoski">\xc4\xe4nekoski</option>
</select>'''
self.assertHTMLEqual(f.render('municipalities', 'turku'), out)
def test_FIZipCodeField(self):
error_format = ['Enter a zip code in the format XXXXX.']
valid = {
'20540': '20540',
'20101': '20101',
}
invalid = {
'20s40': error_format,
'205401': error_format
}
self.assertFieldOutput(FIZipCodeField, valid, invalid)
def test_FISocialSecurityNumber(self):
error_invalid = ['Enter a valid Finnish social security number.']
valid = {
'010101-0101': '010101-0101',
'010101+0101': '010101+0101',
'010101A0101': '010101A0101',
}
invalid = {
'101010-0102': error_invalid,
'10a010-0101': error_invalid,
'101010-0\xe401': error_invalid,
'101010b0101': error_invalid,
}
self.assertFieldOutput(FISocialSecurityNumber, valid, invalid)
| gpl-3.0 |
soldag/home-assistant | homeassistant/components/file/sensor.py | 11 | 2946 | """Support for sensor value(s) stored in local files."""
import logging
import os
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_FILE_PATH = "file_path"
DEFAULT_NAME = "File"
ICON = "mdi:file"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_FILE_PATH): cv.isfile,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the file sensor."""
file_path = config.get(CONF_FILE_PATH)
name = config.get(CONF_NAME)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if hass.config.is_allowed_path(file_path):
async_add_entities([FileSensor(name, file_path, unit, value_template)], True)
else:
_LOGGER.error("'%s' is not an allowed directory", file_path)
class FileSensor(Entity):
"""Implementation of a file sensor."""
def __init__(self, name, file_path, unit_of_measurement, value_template):
"""Initialize the file sensor."""
self._name = name
self._file_path = file_path
self._unit_of_measurement = unit_of_measurement
self._val_tpl = value_template
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest entry from a file and updates the state."""
try:
with open(self._file_path, encoding="utf-8") as file_data:
for line in file_data:
data = line
data = data.strip()
except (IndexError, FileNotFoundError, IsADirectoryError, UnboundLocalError):
_LOGGER.warning(
"File or data not present at the moment: %s",
os.path.basename(self._file_path),
)
return
if self._val_tpl is not None:
self._state = self._val_tpl.async_render_with_possible_json_value(
data, None
)
else:
self._state = data
| apache-2.0 |
CollabQ/CollabQ | vendor/django/contrib/gis/geos/tests/test_io.py | 321 | 4159 | import binascii, ctypes, unittest
from django.contrib.gis.geos import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt)
g2 = wkt_r.read(unicode(wkt))
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept basestring objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, buffer('foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref))
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = '000000000140140000000000004037000000000000'
wkb = buffer(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = '010100000000000000000014400000000000003740'
wkb1 = buffer(binascii.a2b_hex(hex1))
hex2 = '000000000140140000000000004037000000000000'
wkb2 = buffer(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = '0101000080000000000000144000000000000037400000000000003140'
wkb3d = buffer(binascii.a2b_hex(hex3d))
hex3d_srid = '01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = buffer(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()['version'].startswith('3.0.'):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to inlcude the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSIOTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 |
takeshineshiro/nova | nova/tests/functional/v3/test_user_data.py | 3 | 1811 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class UserDataJsonTest(api_sample_base.ApiSampleTestBaseV3):
extension_name = "os-user-data"
_api_version = 'v2'
def _get_flags(self):
f = super(UserDataJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.user_data.User_data')
return f
def test_user_data_post(self):
user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
user_data = base64.b64encode(user_data_contents)
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'user_data': user_data
}
response = self._do_post('servers', 'userdata-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('userdata-post-resp', subs, response, 202)
| apache-2.0 |
Krossom/python-for-android | python-build/python-libs/gdata/build/lib/atom/service.py | 135 | 28680 | #!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import httplib
import urllib
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the current_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
current_token = None
auto_store_tokens = True
auto_set_current_token = True
def _get_override_token(self):
return self.current_token
def _set_override_token(self, token):
self.current_token = token
override_token = property(_get_override_token, _set_override_token)
@atom.v1_deprecated('Please use atom.client.AtomPubClient instead.')
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
return self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
@atom.v1_deprecated('Please use atom.client.AtomPubClient for requests.')
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, (str, unicode)):
if url.startswith('http:') and self.ssl:
# Force all requests to be https if self.ssl is True.
url = atom.url.parse_url('https:' + url[5:])
elif not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.iteritems():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise 'Error status=',str(p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = httplib.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = httplib.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = httplib.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in (url_parameters or {}).items()]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
| apache-2.0 |
PandaWei/tp-libvirt | libguestfs/tests/guestfs_part_operations.py | 8 | 15077 | import re
import os
import logging
from autotest.client.shared import error, utils
from virttest import virt_vm, data_dir, remote, aexpect
from virttest import utils_test
def test_formatted_part(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Do some necessary check
3) Format additional disk
4) Try to write a file to mounted device
5) Login to check written file
"""
add_device = params.get("gf_additional_device", "/dev/vdb")
device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device,
ignore_status=True).stdout.strip()
if utils_test.libguestfs.primary_disk_virtio(vm):
device_in_vm = add_device
else:
device_in_vm = "/dev/vda"
vt = utils_test.libguestfs.VirtTools(vm, params)
# Create a new vm with additional disk
vt.update_vm_disk()
# Get root filesystem before test
params['libvirt_domain'] = vt.newvm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# List devices
list_dev_result = gf.list_devices()
logging.debug(list_dev_result)
if list_dev_result.exit_status:
gf.close_session()
raise error.TestFail("List devices failed")
else:
if not re.search(device_in_gf, list_dev_result.stdout):
gf.close_session()
raise error.TestFail("Did not find additional device.")
logging.info("List devices successfully.")
creates, createo = gf.create_msdos_part(device_in_gf)
if creates is False:
gf.close_session()
raise error.TestFail(createo)
part_name_in_vm = "%s%s" % (device_in_vm, createo)
part_name_in_gf = "%s%s" % (device_in_gf, createo)
logging.info("Create partition successfully.")
mkfs_result = gf.mkfs("ext3", part_name_in_gf)
logging.debug(mkfs_result)
if mkfs_result.exit_status:
gf.close_session()
raise error.TestFail("Format %s Failed" % part_name_in_gf)
logging.info("Format %s successfully.", part_name_in_gf)
mountpoint = params.get("gf_mountpoint", "/mnt")
mount_result = gf.mount(part_name_in_gf, mountpoint)
logging.debug(mount_result)
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Mount %s Failed" % part_name_in_gf)
logging.info("Mount %s successfully.", part_name_in_gf)
# List mounts
list_df_result = gf.df()
logging.debug(list_df_result)
if list_df_result.exit_status:
gf.close_session()
raise error.TestFail("Df failed")
else:
if not re.search(part_name_in_gf, list_df_result.stdout):
gf.close_session()
raise error.TestFail("Did not find mounted device.")
logging.info("Df successfully.")
# Write file
path = "%s/gf_part_test" % mountpoint
content = "This is file for test_formatted_part."
write_result = gf.write(path, content)
gf.close_session()
logging.debug(write_result)
if write_result.exit_status:
raise error.TestFail("Create file failed.")
logging.info("Create %s successfully.", path)
attached_vm = vt.newvm
try:
attached_vm.start()
session = attached_vm.wait_for_login()
except (virt_vm.VMError, remote.LoginError), detail:
attached_vm.destroy()
raise error.TestFail(str(detail))
try:
session.cmd_status("mount %s %s" % (part_name_in_vm, mountpoint),
timeout=10)
session.cmd_status("cat %s" % path, timeout=5)
attached_vm.destroy()
attached_vm.wait_for_shutdown()
except (virt_vm.VMError, remote.LoginError, aexpect.ShellError), detail:
if attached_vm.is_alive():
attached_vm.destroy()
if not re.search(content, str(detail)):
raise error.TestFail(str(detail))
logging.info("Check file on guest successfully.")
def test_unformatted_part(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Do some necessary check
3) Init but not format additional disk
4) Try to mount device
"""
add_device = params.get("gf_additional_device", "/dev/vdb")
device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device,
ignore_status=True).stdout.strip()
vt = utils_test.libguestfs.VirtTools(vm, params)
# Create a new vm with additional disk
vt.update_vm_disk()
# Get root filesystem before test
params['libvirt_domain'] = vt.newvm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# List devices
list_dev_result = gf.list_devices()
logging.debug(list_dev_result)
if list_dev_result.exit_status:
gf.close_session()
raise error.TestFail("List devices failed")
else:
if not re.search(device_in_gf, list_dev_result.stdout):
gf.close_session()
raise error.TestFail("Did not find additional device.")
logging.info("List devices successfully.")
creates, createo = gf.create_msdos_part(device_in_gf)
if creates is False:
gf.close_session()
raise error.TestFail(createo)
part_name_in_gf = "%s%s" % (device_in_gf, createo)
logging.info("Create partition successfully.")
mountpoint = params.get("gf_mountpoint", "/mnt")
mount_result = gf.mount(part_name_in_gf, mountpoint)
gf.close_session()
logging.debug(mount_result)
if mount_result.exit_status == 0:
raise error.TestFail("Mount %s successfully." % part_name_in_gf)
else:
if not re.search("[filesystem|fs] type", mount_result.stdout):
raise error.TestFail("Unknown error.")
def test_formatted_disk(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Do some necessary check
3) Format additional disk with part-disk
4) Try to write a file to mounted device
5) Login to check writed file
"""
add_device = params.get("gf_additional_device", "/dev/vdb")
device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device,
ignore_status=True).stdout.strip()
if utils_test.libguestfs.primary_disk_virtio(vm):
device_in_vm = add_device
else:
device_in_vm = "/dev/vda"
vt = utils_test.libguestfs.VirtTools(vm, params)
# Create a new vm with additional disk
vt.update_vm_disk()
# Get root filesystem before test
params['libvirt_domain'] = vt.newvm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# List devices
list_dev_result = gf.list_devices()
logging.debug(list_dev_result)
if list_dev_result.exit_status:
gf.close_session()
raise error.TestFail("List devices failed")
else:
if not re.search(device_in_gf, list_dev_result.stdout):
gf.close_session()
raise error.TestFail("Did not find additional device.")
logging.info("List devices successfully.")
creates, createo = gf.create_whole_disk_msdos_part(device_in_gf)
if creates is False:
gf.close_session()
raise error.TestFail(createo)
part_name_in_vm = "%s%s" % (device_in_vm, createo)
part_name_in_gf = "%s%s" % (device_in_gf, createo)
logging.info("Create partition successfully.")
mkfs_result = gf.mkfs("ext3", part_name_in_gf)
logging.debug(mkfs_result)
if mkfs_result.exit_status:
gf.close_session()
raise error.TestFail("Format %s Failed" % part_name_in_gf)
logging.info("Format %s successfully.", part_name_in_gf)
mountpoint = params.get("gf_mountpoint", "/mnt")
mount_result = gf.mount(part_name_in_gf, mountpoint)
logging.debug(mount_result)
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Mount %s Failed" % part_name_in_gf)
logging.info("Mount %s successfully.", part_name_in_gf)
# List mounts
list_df_result = gf.df()
logging.debug(list_df_result)
if list_df_result.exit_status:
gf.close_session()
raise error.TestFail("Df failed")
else:
if not re.search(part_name_in_gf, list_df_result.stdout):
gf.close_session()
raise error.TestFail("Did not find mounted device.")
logging.info("Df successfully.")
# Write file
path = "%s/gf_part_test" % mountpoint
content = "This is file for test_formatted_disk."
write_result = gf.write(path, content)
gf.close_session()
logging.debug(write_result)
if write_result.exit_status:
raise error.TestFail("Create file failed.")
logging.info("Create %s successfully.", path)
attached_vm = vt.newvm
try:
attached_vm.start()
session = attached_vm.wait_for_login()
except (virt_vm.VMError, remote.LoginError), detail:
attached_vm.destroy()
raise error.TestFail(str(detail))
try:
session.cmd_status("mount %s %s" % (part_name_in_vm, mountpoint),
timeout=10)
session.cmd_status("cat %s" % path, timeout=5)
attached_vm.destroy()
attached_vm.wait_for_shutdown()
except (virt_vm.VMError, remote.LoginError, aexpect.ShellError), detail:
if attached_vm.is_alive():
attached_vm.destroy()
if not re.search(content, str(detail)):
raise error.TestFail(str(detail))
logging.info("Check file on guest successfully.")
def test_partition_info(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Do some necessary check
3) Get part info with part-get-bootable and part-get-parttype
"""
vt = utils_test.libguestfs.VirtTools(vm, params)
# Create a new vm with additional disk
vt.update_vm_disk()
params['libvirt_domain'] = vt.newvm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# List partitions
list_part_result = gf.list_partitions()
if list_part_result.exit_status:
gf.close_session()
raise error.TestFail("List partitions failed:%s" % list_part_result)
logging.info("List partitions successfully.")
getbas, getbao = gf.get_bootable_part()
logging.debug("Bootable info:%s", getbao)
if getbas is False:
gf.close_session()
raise error.TestFail("Get bootable failed.")
getmbrids, getmbrido = gf.get_mbr_id()
logging.debug("Get mbr id:%s", getmbrido)
if getmbrids is False:
gf.close_session()
raise error.TestFail("Get mbr id failed.")
getpts, getpto = gf.get_part_type()
logging.debug("Get parttype:%s", getpto)
gf.close_session()
if getpts is False:
raise error.TestFail("Get parttype failed.")
def test_fscked_partition(vm, params):
"""
1) Fall into guestfish session w/ inspector
2) Do some necessary check
3) Format additional disk with part-disk
4) Try to write a file to mounted device and get its md5
5) Do fsck to new added partition
"""
add_device = params.get("gf_additional_device", "/dev/vdb")
device_in_gf = utils.run("echo %s | sed -e 's/vd/sd/g'" % add_device,
ignore_status=True).stdout.strip()
vt = utils_test.libguestfs.VirtTools(vm, params)
# Create a new vm with additional disk
vt.update_vm_disk()
params['libvirt_domain'] = vt.newvm.name
params['gf_inspector'] = True
gf = utils_test.libguestfs.GuestfishTools(params)
# List devices
list_dev_result = gf.list_devices()
logging.debug(list_dev_result)
if list_dev_result.exit_status:
gf.close_session()
raise error.TestFail("List devices failed")
else:
if not re.search(device_in_gf, list_dev_result.stdout):
gf.close_session()
raise error.TestFail("Did not find additional device.")
logging.info("List devices successfully.")
creates, createo = gf.create_whole_disk_msdos_part(device_in_gf)
if creates is False:
gf.close_session()
raise error.TestFail(createo)
part_name_in_gf = "%s%s" % (device_in_gf, createo)
logging.info("Create partition successfully.")
mkfs_result = gf.mkfs("ext3", part_name_in_gf)
logging.debug(mkfs_result)
if mkfs_result.exit_status:
gf.close_session()
raise error.TestFail("Format %s Failed" % part_name_in_gf)
logging.info("Format %s successfully.", part_name_in_gf)
mountpoint = params.get("gf_mountpoint", "/mnt")
mount_result = gf.mount(part_name_in_gf, mountpoint)
logging.debug(mount_result)
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Mount %s Failed" % part_name_in_gf)
logging.info("Mount %s successfully.", part_name_in_gf)
# List mounts
list_df_result = gf.df()
logging.debug(list_df_result)
if list_df_result.exit_status:
gf.close_session()
raise error.TestFail("Df failed")
else:
if not re.search(part_name_in_gf, list_df_result.stdout):
gf.close_session()
raise error.TestFail("Did not find mounted device.")
logging.info("Df successfully.")
# Write file
path = "%s/gf_part_test" % mountpoint
content = "This is file for test_fscked_partition."
write_result = gf.write(path, content)
logging.debug(write_result)
if write_result.exit_status:
gf.close_session()
raise error.TestFail("Create file failed.")
logging.info("Create %s successfully.", path)
md5s, md5o = gf.get_md5(path)
if md5s is False:
gf.close_session()
raise error.TestFail(md5o)
md5_old = md5o.strip()
logging.debug("%s's md5 in oldvm is:%s", path, md5_old)
# Do fsck
fsck_result = gf.fsck("ext3", part_name_in_gf)
logging.debug(fsck_result)
if fsck_result.exit_status:
raise error.TestFail("Do fsck to %s failed." % part_name_in_gf)
logging.info("Do fsck to %s successfully.", part_name_in_gf)
md5s, md5o = gf.get_md5(path)
if md5s is False:
gf.close_session()
raise error.TestFail(md5o)
gf.close_session()
md5_new = md5o.strip()
logging.debug("%s's md5 in newvm is:%s", path, md5_new)
if md5_old != md5_new:
raise error.TestFail("Md5 of new vm is not match with old one.")
def run(test, params, env):
"""
Test guestfs with partition commands.
"""
vm_name = params.get("main_vm")
new_vm_name = params.get("gf_updated_new_vm")
vm = env.get_vm(vm_name)
# To make sure old vm is down
if vm.is_alive():
vm.destroy()
operation = params.get("gf_part_operation")
testcase = globals()["test_%s" % operation]
try:
# Create a new vm for editing and easier cleanup :)
utils_test.libguestfs.define_new_vm(vm_name, new_vm_name)
testcase(vm, params)
finally:
disk_path = os.path.join(data_dir.get_tmp_dir(),
params.get("gf_updated_target_dev"))
utils_test.libguestfs.cleanup_vm(new_vm_name, disk_path)
| gpl-2.0 |
Aasmi/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
fbradyirl/home-assistant | homeassistant/components/supla/__init__.py | 1 | 4458 | """Support for Supla devices."""
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.const import CONF_ACCESS_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ["pysupla==0.0.3"]
_LOGGER = logging.getLogger(__name__)
DOMAIN = "supla"
CONF_SERVER = "server"
CONF_SERVERS = "servers"
SUPLA_FUNCTION_HA_CMP_MAP = {"CONTROLLINGTHEROLLERSHUTTER": "cover"}
SUPLA_CHANNELS = "supla_channels"
SUPLA_SERVERS = "supla_servers"
SERVER_CONFIG = vol.Schema(
{vol.Required(CONF_SERVER): cv.string, vol.Required(CONF_ACCESS_TOKEN): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_SERVERS): vol.All(cv.ensure_list, [SERVER_CONFIG])}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, base_config):
"""Set up the Supla component."""
from pysupla import SuplaAPI
server_confs = base_config[DOMAIN][CONF_SERVERS]
hass.data[SUPLA_SERVERS] = {}
hass.data[SUPLA_CHANNELS] = {}
for server_conf in server_confs:
server_address = server_conf[CONF_SERVER]
server = SuplaAPI(server_address, server_conf[CONF_ACCESS_TOKEN])
# Test connection
try:
srv_info = server.get_server_info()
if srv_info.get("authenticated"):
hass.data[SUPLA_SERVERS][server_conf[CONF_SERVER]] = server
else:
_LOGGER.error(
"Server: %s not configured. API call returned: %s",
server_address,
srv_info,
)
return False
except IOError:
_LOGGER.exception(
"Server: %s not configured. Error on Supla API access: ", server_address
)
return False
discover_devices(hass, base_config)
return True
def discover_devices(hass, hass_config):
"""
Run periodically to discover new devices.
Currently it's only run at startup.
"""
component_configs = {}
for server_name, server in hass.data[SUPLA_SERVERS].items():
for channel in server.get_channels(include=["iodevice"]):
channel_function = channel["function"]["name"]
component_name = SUPLA_FUNCTION_HA_CMP_MAP.get(channel_function)
if component_name is None:
_LOGGER.warning(
"Unsupported function: %s, channel id: %s",
channel_function,
channel["id"],
)
continue
channel["server_name"] = server_name
component_configs.setdefault(component_name, []).append(channel)
# Load discovered devices
for component_name, channel in component_configs.items():
load_platform(hass, component_name, "supla", channel, hass_config)
class SuplaChannel(Entity):
"""Base class of a Supla Channel (an equivalent of HA's Entity)."""
def __init__(self, channel_data):
"""Channel data -- raw channel information from PySupla."""
self.server_name = channel_data["server_name"]
self.channel_data = channel_data
@property
def server(self):
"""Return PySupla's server component associated with entity."""
return self.hass.data[SUPLA_SERVERS][self.server_name]
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "supla-{}-{}".format(
self.channel_data["iodevice"]["gUIDString"].lower(),
self.channel_data["channelNumber"],
)
@property
def name(self) -> Optional[str]:
"""Return the name of the device."""
return self.channel_data["caption"]
def action(self, action, **add_pars):
"""
Run server action.
Actions are currently hardcoded in components.
Supla's API enables autodiscovery
"""
_LOGGER.debug(
"Executing action %s on channel %d, params: %s",
action,
self.channel_data["id"],
add_pars,
)
self.server.execute_action(self.channel_data["id"], action, **add_pars)
def update(self):
"""Call to update state."""
self.channel_data = self.server.get_channel(
self.channel_data["id"], include=["connected", "state"]
)
| apache-2.0 |
kylelwm/ponus | venv/Lib/encodings/euc_kr.py | 816 | 1027 | #
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
sliz1/servo | tests/wpt/css-tests/css21_dev/xhtml1print/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
SymbiFlow/symbiflow-arch-defs | utils/lib/collections_extra.py | 1 | 8235 | #!/usr/bin/env python3
import enum
import io
import pprint
import sys
from types import MappingProxyType
def frozendict(*args, **kwargs):
"""Version of a dictionary which can't be changed."""
return MappingProxyType(dict(*args, **kwargs))
class MostlyReadOnly:
"""Object which is **mostly** read only. Can set if not already set.
>>> class MyRO(MostlyReadOnly):
... __slots__ = ["_str", "_list", "_set", "_dict"]
>>> a = MyRO()
>>> a
MyRO(str=None, list=None, set=None, dict=None)
>>> a._str = 't'
>>> a.str
't'
>>> a._list = [1,2,3]
>>> a.list
(1, 2, 3)
>>> a._set = {1, 2, 3}
>>> a.set
frozenset({1, 2, 3})
>>> a._dict = {'a': 1, 'b': 2, 'c': 3}
>>> b = a.dict
>>> b['d'] = 4
Traceback (most recent call last):
...
b['d'] = 4
TypeError: 'mappingproxy' object does not support item assignment
>>> sorted(b.items())
[('a', 1), ('b', 2), ('c', 3)]
>>> a._dict['d'] = 4
>>> sorted(a._dict.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> sorted(b.items())
[('a', 1), ('b', 2), ('c', 3)]
>>> a
MyRO(str='t', list=[1, 2, 3], set={1, 2, 3}, dict={'a': 1, 'b': 2, 'c': 3, 'd': 4})
>>> a.missing
Traceback (most recent call last):
...
AttributeError: 'MyRO' object has no attribute 'missing'
>>> a.missing = 1
Traceback (most recent call last):
...
AttributeError: missing not found on <class 'lib.collections_extra.MyRO'>
>>> a.missing
Traceback (most recent call last):
...
AttributeError: 'MyRO' object has no attribute 'missing'
"""
def __setattr__(self, key, new_value=None):
if key.startswith("_"):
current_value = getattr(self, key[1:])
if new_value == current_value:
return
elif current_value is not None:
raise AttributeError(
"{} is already set to {}, can't be changed".format(
key, current_value
)
)
return super().__setattr__(key, new_value)
if "_" + key not in self.__class__.__slots__:
raise AttributeError(
"{} not found on {}".format(key, self.__class__)
)
self.__setattr__("_" + key, new_value)
def __getattr__(self, key):
if "_" + key not in self.__class__.__slots__:
super().__getattribute__(key)
value = getattr(self, "_" + key, None)
if isinstance(value,
(tuple, int, bytes, str, type(None), MostlyReadOnly)):
return value
elif isinstance(value, list):
return tuple(value)
elif isinstance(value, set):
return frozenset(value)
elif isinstance(value, dict):
return frozendict(value)
elif isinstance(value, enum.Enum):
return value
else:
raise AttributeError(
"Unable to return {}, don't now how to make type {} (from {!r}) read only."
.format(key, type(value), value)
)
def __repr__(self):
attribs = []
for attr in self.__slots__:
value = getattr(self, attr, None)
if isinstance(value, MostlyReadOnly):
rvalue = "{}()".format(value.__class__.__name__)
elif isinstance(value, (dict, set)):
s = io.StringIO()
pprint.pprint(value, stream=s, width=sys.maxsize)
rvalue = s.getvalue().strip()
else:
rvalue = repr(value)
if attr.startswith("_"):
attr = attr[1:]
attribs.append("{}={!s}".format(attr, rvalue))
return "{}({})".format(self.__class__.__name__, ", ".join(attribs))
class OrderedEnum(enum.Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.name >= other.name
if hasattr(other.__class__, "name"):
return self.name >= other.name
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.name > other.name
if hasattr(other.__class__, "name"):
return self.name > other.name
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.name <= other.name
if hasattr(other.__class__, "name"):
return self.name <= other.name
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.name < other.name
if hasattr(other.__class__, "name"):
return self.name < other.name
return NotImplemented
class CompassDir(OrderedEnum):
"""
>>> print(repr(CompassDir.NN))
<CompassDir.NN: 'North'>
>>> print(str(CompassDir.NN))
( 0, -1, NN)
>>> for d in CompassDir:
... print(OrderedEnum.__str__(d))
CompassDir.NW
CompassDir.NN
CompassDir.NE
CompassDir.EE
CompassDir.SE
CompassDir.SS
CompassDir.SW
CompassDir.WW
>>> for y in (-1, 0, 1):
... for x in (-1, 0, 1):
... print(
... "(%2i %2i)" % (x, y),
... str(CompassDir.from_coords(x, y)),
... str(CompassDir.from_coords((x, y))),
... )
(-1 -1) (-1, -1, NW) (-1, -1, NW)
( 0 -1) ( 0, -1, NN) ( 0, -1, NN)
( 1 -1) ( 1, -1, NE) ( 1, -1, NE)
(-1 0) (-1, 0, WW) (-1, 0, WW)
( 0 0) None None
( 1 0) ( 1, 0, EE) ( 1, 0, EE)
(-1 1) (-1, 1, SW) (-1, 1, SW)
( 0 1) ( 0, 1, SS) ( 0, 1, SS)
( 1 1) ( 1, 1, SE) ( 1, 1, SE)
>>> print(str(CompassDir.NN.flip()))
( 0, 1, SS)
>>> print(str(CompassDir.SE.flip()))
(-1, -1, NW)
"""
NW = 'North West'
NN = 'North'
NE = 'North East'
EE = 'East'
SE = 'South East'
SS = 'South'
SW = 'South West'
WW = 'West'
# Single letter aliases
N = NN
E = EE
S = SS
W = WW
@property
def distance(self):
return sum(a * a for a in self.coords)
def __init__(self, *args, **kw):
self.__cords = None
pass
@property
def coords(self):
if not self.__cords:
self.__cords = self.convert_to_coords[self]
return self.__cords
@property
def x(self):
return self.coords[0]
@property
def y(self):
return self.coords[-1]
def __iter__(self):
return iter(self.coords)
def __getitem__(self, k):
return self.coords[k]
@classmethod
def from_coords(cls, x, y=None):
if y is None:
return cls.from_coords(*x)
return cls.convert_from_coords[(x, y)]
def flip(self):
return self.from_coords(self.flip_coords[self.coords])
def __add__(self, o):
return o.__class__(o[0] + self.x, o[1] + self.y)
def __radd__(self, o):
return o.__class__(o[0] + self.x, o[1] + self.y)
def __str__(self):
return "(%2i, %2i, %s)" % (self.x, self.y, self.name)
CompassDir.convert_to_coords = {}
CompassDir.convert_from_coords = {}
CompassDir.flip_coords = {}
CompassDir.straight = []
CompassDir.angled = []
for d in list(CompassDir) + [None]:
if d is None:
x, y = 0, 0
else:
if d.name[0] == 'N':
y = -1
elif d.name[0] == 'S':
y = 1
else:
assert d.name[0] in ('E', 'W')
y = 0
if d.name[1] == 'E':
x = 1
elif d.name[1] == 'W':
x = -1
else:
assert d.name[1] in ('N', 'S')
x = 0
CompassDir.convert_to_coords[d] = (x, y)
CompassDir.convert_from_coords[(x, y)] = d
CompassDir.flip_coords[(x, y)] = (-1 * x, -1 * y)
length = x * x + y * y
if length == 1:
CompassDir.straight.append(d)
elif length == 2:
CompassDir.angled.append(d)
if __name__ == "__main__":
import doctest
failure_count, test_count = doctest.testmod()
assert test_count > 0
assert failure_count == 0, "Doctests failed!"
| isc |
perdona/django-pipeline | tests/tests/test_glob.py | 6 | 3728 | from __future__ import unicode_literals
import os
import shutil
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.test import TestCase
from pipeline import glob
def local_path(path):
return os.path.join(os.path.dirname(__file__), path)
class GlobTest(TestCase):
def normpath(self, *parts):
return os.path.normpath(os.path.join(*parts))
def mktemp(self, *parts):
filename = self.normpath(*parts)
base, file = os.path.split(filename)
base = os.path.join(self.storage.location, base)
if not os.path.exists(base):
os.makedirs(base)
self.storage.save(filename, ContentFile(""))
def assertSequenceEqual(self, l1, l2):
self.assertEqual(set(l1), set(l2))
def setUp(self):
self.storage = FileSystemStorage(local_path('glob_dir'))
self.old_storage = glob.staticfiles_storage
glob.staticfiles_storage = self.storage
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
return glob.glob(pattern)
def tearDown(self):
shutil.rmtree(self.storage.location)
glob.staticfiles_storage = self.old_storage
def test_glob_literal(self):
self.assertSequenceEqual(self.glob('a'),
[self.normpath('a')])
self.assertSequenceEqual(self.glob('a', 'D'),
[self.normpath('a', 'D')])
self.assertSequenceEqual(self.glob('aab'),
[self.normpath('aab')])
self.assertSequenceEqual(self.glob('zymurgy'), [])
def test_glob_one_directory(self):
self.assertSequenceEqual(self.glob('a*'),
map(self.normpath, ['a', 'aab', 'aaa']))
self.assertSequenceEqual(self.glob('*a'),
map(self.normpath, ['a', 'aaa']))
self.assertSequenceEqual(self.glob('aa?'),
map(self.normpath, ['aaa', 'aab']))
self.assertSequenceEqual(self.glob('aa[ab]'),
map(self.normpath, ['aaa', 'aab']))
self.assertSequenceEqual(self.glob('*q'), [])
def test_glob_nested_directory(self):
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
self.assertSequenceEqual(self.glob('a', 'bcd', 'E*'),
[self.normpath('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
self.assertSequenceEqual(self.glob('a', 'bcd', 'E*'), [
self.normpath('a', 'bcd', 'EF'),
self.normpath('a', 'bcd', 'efg')
])
self.assertSequenceEqual(self.glob('a', 'bcd', '*g'),
[self.normpath('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
self.assertSequenceEqual(self.glob('*', 'D'),
[self.normpath('a', 'D')])
self.assertSequenceEqual(self.glob('*', '*a'), [])
self.assertSequenceEqual(self.glob('a', '*', '*', '*a'),
[self.normpath('a', 'bcd', 'efg', 'ha')])
self.assertSequenceEqual(self.glob('?a?', '*F'),
map(self.normpath, [os.path.join('aaa', 'zzzF'),
os.path.join('aab', 'F')]))
def test_glob_directory_with_trailing_slash(self):
# We are verifying that when there is wildcard pattern which
# ends with os.sep doesn't blow up.
paths = glob.glob('*' + os.sep)
self.assertEqual(len(paths), 4)
self.assertTrue(all([os.sep in path for path in paths]))
| mit |
Slattsveen/eduROV_v2 | eduROV_v01.py | 1 | 6154 | import sys, pygame, pygame.camera, os, time, serial
from sense_hat import SenseHat
# to add the piCam to the list of available cameras, run command:
# sudo modprobe bcm2835-v4l2
os.system("sudo modprobe bcm2835-v4l2")
##ititiating pygame module, camera, serial port, senseHat
pygame.init()
pygame.camera.init()
senseHat = SenseHat()
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=0.050)
ser.close()
ser.open()
FPS = 28 #image updating frequenzy
fpsClock = pygame.time.Clock()
imgCounter = 0 #counter variable for numbering still frames taken during mission
width, height = 900, 600
picWidth, picHeight = 600, 400 #400, 250 used
##sensor variables, internally and externally of the ROV hull
tempROV = senseHat.get_temperature()
pressureROV = senseHat.get_pressure()
bearingROV = senseHat.get_compass() #gives compas bearing in degrees from North
humidityROV = senseHat.get_humidity()
orientationROV = senseHat.get_orientation() #a dict containgin values with tags 'pitch', 'roll', 'yaw'
batteryVoltage = 0
BATTVOLTCALIBRATION = 0.27
tempWater = 0
pressureWater = 0
motorStates = "000" #each motor [i = 0, 1, 2] has 3 states {off = 0, dir1 = 1, dir2 = 2}
##input keys, the ROV drives like a tank. Forwards and reverse on each side to turn
RISING = pygame.K_w
DIVING = pygame.K_s
PORT_GO = pygame.K_q
PORT_BACK = pygame.K_a
STARBOARD_GO = pygame.K_e
STARBOARD_BACK = pygame.K_d
PICTURE = pygame.K_SPACE
##motor control parameters
motorStates = [0, 0, 0] #dive/rise, portRev/fortFwd, strbdRev/strbdFwd
currentState = "000" #motorStates converted to string variable for serial message to arduino
lastState = "000"
##color definitions
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (85, 151, 255)
##import GUI elements (sprites), setup main screen
screen = pygame.display.set_mode((900, 600), 0)
#screen = pygame.display.set_mode((900, 600), pygame.FULLSCREEN)
##camera feed setup
cam_list = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cam_list[0], (picWidth, picHeight))
cam.start()
##function definitions
def text(content, xPos, yPos):
#string text to display, x and y position of the text to be displayed
font = pygame.font.Font(None, 24)
text = font.render(str(content), 1, (255, 255, 255))
screen.blit(text, (xPos, yPos))
def printState(array):
#returns a string containing the string to be sent to the arduino indicating motor states
msg = ""
for val in array:
msg += str(val)
print(msg) #print motor states to terminal for debugging/verification
return(msg)
#main while loop
while True:
##update camera feed with a new image
streamImage = cam.get_image()
#streamImage = pygame.transform.scale(streamImage, (900, 600))
screen.blit(streamImage, (150, 100)) #the camera feed now covers the whole window
#screen.blit(streamImage, (0, 0)) #the camera feed now covers the whole window
##read sensor data
#receive serial message with sensor data from Arduino
serialInput = " "
if ser.inWaiting():
serialInput = ser.readline()
#print(serialInput)
if serialInput.count(':') == 2: #check that the message is complete
tempWater, pressureWater, batteryVoltage = serialInput.split(':')
#print(batteryVoltage)
tempWater = round(float(tempWater), 1)
#pressureWater = round(pressureWater, 1)
batteryVoltage = round(float(batteryVoltage) - BATTVOLTCALIBRATION, 1)
#update sensor values from the onboard senseHat
#add conversions to water depth and atmospheric pressure
tempROV = round(senseHat.get_temperature(), 1)
pressureROV = round((senseHat.get_pressure() / 10), 1)
bearingROV = senseHat.get_compass()
humidityROV = senseHat.get_humidity()
orientationROV = senseHat.get_orientation()
#write sensorvalues to screen
text((str(batteryVoltage) + "V"), 750, 20)
text("min voltage: 7.5V", 750, 40)
text("Outside ROV:", 20, 120)
text((str(tempWater) + " C"), 20, 145)
text((str(pressureWater) + " kPa"), 20, 170)
text("Inside ROV:", 750, 120)
text((str(tempROV) + " C"), 750, 145)
text((str(pressureROV) + " kPa"), 750, 170)
##read keyboard input
for event in pygame.event.get():
#user closes the streaming window
if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE):
cam.stop()
ser.close()
pygame.quit()
sys.exit()
#user presses a motor control key
if event.type == pygame.KEYDOWN and event.key == pygame.K_a:
motorStates[1] = 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_q:
motorStates[1] = 2
if event.type == pygame.KEYDOWN and event.key == pygame.K_w:
motorStates[0] = 1
if event.type == pygame.KEYDOWN and event.key == pygame.K_s:
motorStates[0] = 2
if event.type == pygame.KEYDOWN and event.key == pygame.K_e:
motorStates[2] = 2
if event.type == pygame.KEYDOWN and event.key == pygame.K_d:
motorStates[2] = 1
if event.type == pygame.KEYUP and event.key == pygame.K_a:
motorStates[1] = 0
if event.type == pygame.KEYUP and event.key == pygame.K_q:
motorStates[1] = 0
if event.type == pygame.KEYUP and event.key == pygame.K_w:
motorStates[0] = 0
if event.type == pygame.KEYUP and event.key == pygame.K_s:
motorStates[0] = 0
if event.type == pygame.KEYUP and event.key == pygame.K_e:
motorStates[2] = 0
if event.type == pygame.KEYUP and event.key == pygame.K_d:
motorStates[2] = 0
currentState = printState(motorStates)
if currentState != lastState:
#send the new motor states if they change from last loop
lastState = currentState
ser.write(currentState)
print("Sent commands") #verify that commands have been sent
##update the whole screen image
pygame.display.flip()
fpsClock.tick(FPS)
screen.fill(BLUE)
| gpl-3.0 |
adobe-type-tools/python-scripts | buildAll.py | 1 | 3612 | #!/usr/bin/env python
import os
import sys
import time
from subprocess import Popen, PIPE
__doc__ = """
buildAll v1.2 - Dec 01 2019
This script takes a path to a folder as input, finds all UFO files or Type 1
fonts (.pfa files) inside that folder and its subdirectories, and builds
OpenType (.otf) fonts using the FDK's makeotf tool.
If a path is not provided, the script will use the current path as the topmost
directory.
The script ignores Multiple Master PFA fonts, usually named 'mmfont.pfa'.
The Type 1 fonts can also be in plain text format (.txt) where the Private
and CharStrings dictionaries are not encrypted. These files can be created
by using the FDK's detype1 tool.
==================================================
Versions:
v1.0 - Feb 22 2013 - Initial release
v1.1 - Aug 04 2013 - Added support for UFO files
v1.2 - Dec 01 2019 - Python 3
"""
kFontProjFile = "current.fpr"
kFontTXT = "font.txt"
def getFontPaths(path):
fontsList = []
for r, folders, files in os.walk(os.path.realpath(path)):
fileAndFolderList = folders[:]
fileAndFolderList.extend(files)
for item in fileAndFolderList:
fileName, extension = os.path.splitext(item)
extension = extension.lower()
if extension == ".pfa" and not fileName == "mmfont":
fontsList.append(os.path.join(r, item))
elif extension == ".txt" and fileName == "font":
fontsList.append(os.path.join(r, item))
elif extension == ".ufo":
fontsList.append(os.path.join(r, item))
else:
continue
return fontsList
def doTask(fonts):
totalFonts = len(fonts)
print("%d fonts found\n" % totalFonts)
i = 1
for font in fonts:
folderPath, fontFileName = os.path.split(font)
styleName = os.path.basename(folderPath)
# Change current directory to the folder where the font is contained
os.chdir(folderPath)
print('*******************************')
print('Building %s...(%d/%d)' % (styleName, i, totalFonts))
cmd = 'makeotf -f "%s" -gs -r' % fontFileName # -gs option: only the glyphs listed in the GOADB file will be included in OTF
# cmd = 'makeotf -f "%s" -addn -r' % fontFileName # adds marking notdef glyph
popen = Popen(cmd, shell=True, stdout=PIPE)
popenout, popenerr = popen.communicate()
if popenout:
print(popenout.decode('utf-8'))
if popenerr:
print(popenerr.decode('utf-8'))
i += 1
# Delete project file
if os.path.exists(kFontProjFile):
os.remove(kFontProjFile)
def run():
# if a path is provided
if len(sys.argv[1:]):
baseFolderPath = sys.argv[1]
if baseFolderPath[-1] == '/': # remove last slash if present
baseFolderPath = baseFolderPath[:-1]
# make sure the path is valid
if not os.path.isdir(baseFolderPath):
print('Invalid directory.')
return
# if a path is not provided, use the current directory
else:
baseFolderPath = os.getcwd()
t1 = time.time()
fontsList = getFontPaths(baseFolderPath)
if len(fontsList):
doTask(fontsList)
else:
print("No fonts found")
return
t2 = time.time()
elapsedSeconds = t2 - t1
elapsedMinutes = elapsedSeconds / 60
if elapsedMinutes < 1:
print('Completed in %.1f seconds.' % elapsedSeconds)
else:
print('Completed in %.1f minutes.' % elapsedMinutes)
if __name__=='__main__':
run()
| mit |
r888888888/models | street/python/vgsl_eval.py | 22 | 2048 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model eval separate from training."""
from tensorflow import app
from tensorflow.python.platform import flags
import vgsl_model
flags.DEFINE_string('eval_dir', '/tmp/mdir/eval',
'Directory where to write event logs.')
flags.DEFINE_string('graph_def_file', None,
'Output eval graph definition file.')
flags.DEFINE_string('train_dir', '/tmp/mdir',
'Directory where to find training checkpoints.')
flags.DEFINE_string('model_str',
'1,150,600,3[S2(4x150)0,2 Ct5,5,16 Mp2,2 Ct5,5,64 Mp3,3'
'([Lrys64 Lbx128][Lbys64 Lbx128][Lfys64 Lbx128])S3(3x0)2,3'
'Lfx128 Lrx128 S0(1x4)0,3 Do Lfx256]O1c134',
'Network description.')
flags.DEFINE_integer('num_steps', 1000, 'Number of steps to run evaluation.')
flags.DEFINE_integer('eval_interval_secs', 60,
'Time interval between eval runs.')
flags.DEFINE_string('eval_data', None, 'Evaluation data filepattern')
flags.DEFINE_string('decoder', None, 'Charset decoder')
FLAGS = flags.FLAGS
def main(argv):
del argv
vgsl_model.Eval(FLAGS.train_dir, FLAGS.eval_dir, FLAGS.model_str,
FLAGS.eval_data, FLAGS.decoder, FLAGS.num_steps,
FLAGS.graph_def_file, FLAGS.eval_interval_secs)
if __name__ == '__main__':
app.run()
| apache-2.0 |
enriclluelles/ansible-modules-extras | packaging/os/swdepot.py | 8 | 6163 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Raul Melo
# Written by Raul Melo <[email protected]>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
import pipes
DOCUMENTATION = '''
---
module: swdepot
short_description: Manage packages with swdepot package manager (HP-UX)
description:
- Will install, upgrade and remove packages with swdepot package manager (HP-UX)
version_added: "1.4"
notes: []
author: '"Raul Melo (@melodous)" <[email protected]>'
options:
name:
description:
- package name.
required: true
default: null
choices: []
aliases: []
version_added: 1.4
state:
description:
- whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
default: null
choices: [ 'present', 'latest', 'absent']
aliases: []
version_added: 1.4
depot:
description:
- The source repository from which install or upgrade a package.
required: false
default: null
choices: []
aliases: []
version_added: 1.4
'''
EXAMPLES = '''
- swdepot: name=unzip-6.0 state=installed depot=repository:/path
- swdepot: name=unzip state=latest depot=repository:/path
- swdepot: name=unzip state=absent
'''
def compare_package(version1, version2):
""" Compare version packages.
Return values:
-1 first minor
0 equal
1 fisrt greater """
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def query_package(module, name, depot=None):
""" Returns whether a package is installed or not and version. """
cmd_list = '/usr/sbin/swlist -a revision -l product'
if depot:
rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1]
else:
version = None
return rc, version
def remove_package(module, name):
""" Uninstall package if installed. """
cmd_remove = '/usr/sbin/swremove'
rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def install_package(module, depot, name):
""" Install package if not already installed """
cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg'], required=True),
state = dict(choices=['present', 'absent', 'latest'], required=True),
depot = dict(default=None, required=False)
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
depot = module.params['depot']
changed = False
msg = "No changed"
rc = 0
if ( state == 'present' or state == 'latest' ) and depot == None:
output = "depot parameter is mandatory in present or latest task"
module.fail_json(name=name, msg=output, rc=rc)
#Check local version
rc, version_installed = query_package(module, name)
if not rc:
installed = True
msg = "Already installed"
else:
installed = False
if ( state == 'present' or state == 'latest' ) and installed == False:
if module.check_mode:
module.exit_json(changed=True)
rc, output = install_package(module, depot, name)
if not rc:
changed = True
msg = "Packaged installed"
else:
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'latest' and installed == True:
#Check depot version
rc, version_depot = query_package(module, name, depot)
if not rc:
if compare_package(version_installed,version_depot) == -1:
if module.check_mode:
module.exit_json(changed=True)
#Install new version
rc, output = install_package(module, depot, name)
if not rc:
msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot
changed = True
else:
module.fail_json(name=name, msg=output, rc=rc)
else:
output = "Software package not in repository " + depot
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'absent' and installed == True:
if module.check_mode:
module.exit_json(changed=True)
rc, output = remove_package(module, name)
if not rc:
changed = True
msg = "Package removed"
else:
module.fail_json(name=name, msg=output, rc=rc)
if module.check_mode:
module.exit_json(changed=False)
module.exit_json(changed=changed, name=name, state=state, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
kawamon/hue | desktop/core/ext-py/billiard-3.5.0.5/billiard/resource_sharer.py | 9 | 5361 | #
# We use a background thread for sharing fds on Unix, and for sharing
# sockets on Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
from __future__ import absolute_import
import os
import signal
import socket
import sys
import threading
from . import process
from . import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resouces using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
| apache-2.0 |
kwlzn/pants | src/python/pants/goal/goal.py | 4 | 8317 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.goal.error import GoalError
from pants.option.optionable import Optionable
class Goal(object):
"""Factory for objects representing goals.
Ensures that we have exactly one instance per goal name.
:API: public
"""
_goal_by_name = dict()
def __new__(cls, *args, **kwargs):
raise TypeError('Do not instantiate {0}. Call by_name() instead.'.format(cls))
@classmethod
def register(cls, name, description):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
"""
cls.by_name(name)._description = description
@classmethod
def by_name(cls, name):
"""Returns the unique object representing the goal of the specified name.
:API: public
"""
if name not in cls._goal_by_name:
cls._goal_by_name[name] = _Goal(name)
return cls._goal_by_name[name]
@classmethod
def clear(cls):
"""Remove all goals and tasks.
This method is EXCLUSIVELY for use in tests and during pantsd startup.
:API: public
"""
cls._goal_by_name.clear()
@staticmethod
def scope(goal_name, task_name):
"""Returns options scope for specified task in specified goal.
:API: public
"""
return goal_name if goal_name == task_name else '{0}.{1}'.format(goal_name, task_name)
@staticmethod
def all():
"""Returns all registered goals, sorted alphabetically by name.
:API: public
"""
return [pair[1] for pair in sorted(Goal._goal_by_name.items())]
@classmethod
def subsystems(cls):
"""Returns all subsystem types used by all tasks, in no particular order.
:API: public
"""
ret = set()
for goal in cls.all():
ret.update(goal.subsystems())
return ret
class _Goal(object):
def __init__(self, name):
"""Don't call this directly.
Create goals only through the Goal.by_name() factory.
"""
Optionable.validate_scope_name_component(name)
self.name = name
self._description = ''
self.serialize = False
self._task_type_by_name = {} # name -> Task subclass.
self._ordered_task_names = [] # The task names, in the order imposed by registration.
@property
def description(self):
if self._description:
return self._description
# Return the docstring for the Task registered under the same name as this goal, if any.
# This is a very common case, and therefore a useful idiom.
namesake_task = self._task_type_by_name.get(self.name)
if namesake_task and namesake_task.__doc__:
# First line of docstring.
# TODO: This is repetitive of Optionable.get_description(). We should probably just
# make Goal an Optionable, for uniformity.
return namesake_task.__doc__.partition('\n')[0].strip()
return ''
def register_options(self, options):
for task_type in sorted(self.task_types(), key=lambda cls: cls.options_scope):
task_type.register_options_on_scope(options)
def install(self, task_registrar, first=False, replace=False, before=None, after=None):
"""Installs the given task in this goal.
The placement of the task in this goal's execution list defaults to the end but its position
can be influenced by specifying exactly one of the following arguments:
first: Places the task 1st in the execution list.
replace: Removes all existing tasks in this goal and installs this task.
before: Places the task before the named task in the execution list.
after: Places the task after the named task in the execution list.
:API: public
"""
if [bool(place) for place in [first, replace, before, after]].count(True) > 1:
raise GoalError('Can only specify one of first, replace, before or after')
task_name = task_registrar.name
Optionable.validate_scope_name_component(task_name)
options_scope = Goal.scope(self.name, task_name)
# Currently we need to support registering the same task type multiple times in different
# scopes. However we still want to have each task class know the options scope it was
# registered in. So we create a synthetic subclass here.
# TODO(benjy): Revisit this when we revisit the task lifecycle. We probably want to have
# a task *instance* know its scope, but this means converting option registration from
# a class method to an instance method, and instantiating the task much sooner in the
# lifecycle.
superclass = task_registrar.task_type
subclass_name = b'{0}_{1}'.format(superclass.__name__,
options_scope.replace('.', '_').replace('-', '_'))
task_type = type(subclass_name, (superclass,), {
'__doc__': superclass.__doc__,
'__module__': superclass.__module__,
'options_scope': options_scope,
'_stable_name': superclass.stable_name()
})
otn = self._ordered_task_names
if replace:
for tt in self.task_types():
tt.options_scope = None
del otn[:]
self._task_type_by_name = {}
if first:
otn.insert(0, task_name)
elif before in otn:
otn.insert(otn.index(before), task_name)
elif after in otn:
otn.insert(otn.index(after) + 1, task_name)
else:
otn.append(task_name)
self._task_type_by_name[task_name] = task_type
if task_registrar.serialize:
self.serialize = True
return self
def uninstall_task(self, name):
"""Removes the named task from this goal.
Allows external plugins to modify the execution plan. Use with caution.
Note: Does not relax a serialization requirement that originated
from the uninstalled task's install() call.
:API: public
"""
if name in self._task_type_by_name:
self._task_type_by_name[name].options_scope = None
del self._task_type_by_name[name]
self._ordered_task_names = [x for x in self._ordered_task_names if x != name]
else:
raise GoalError('Cannot uninstall unknown task: {0}'.format(name))
def known_scope_infos(self):
"""Yields ScopeInfos for all known scopes under this goal."""
# Note that we don't yield the goal's own scope. We don't need it (as we don't register
# options on it), and it's needlessly confusing when a task has the same name as its goal,
# in which case we shorten its scope to the goal's scope (e.g., idea.idea -> idea).
for task_type in self.task_types():
for scope_info in task_type.known_scope_infos():
yield scope_info
def subsystems(self):
"""Returns all subsystem types used by tasks in this goal, in no particular order."""
ret = set()
for task_type in self.task_types():
ret.update([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()])
return ret
def ordered_task_names(self):
"""The task names in this goal, in registration order."""
return self._ordered_task_names
def task_type_by_name(self, name):
"""The task type registered under the given name."""
return self._task_type_by_name[name]
def task_types(self):
"""Returns the task types in this goal, unordered."""
return self._task_type_by_name.values()
def task_items(self):
for name, task_type in self._task_type_by_name.items():
yield name, task_type
def has_task_of_type(self, typ):
"""Returns True if this goal has a task of the given type (or a subtype of it)."""
for task_type in self.task_types():
if issubclass(task_type, typ):
return True
return False
def __repr__(self):
return self.name
| apache-2.0 |
chrisfilda/edx_platform | lms/djangoapps/instructor/views/api_urls.py | 7 | 2854 | """
Instructor API endpoint urls.
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('', # nopep8
url(r'^students_update_enrollment$',
'instructor.views.api.students_update_enrollment', name="students_update_enrollment"),
url(r'^list_course_role_members$',
'instructor.views.api.list_course_role_members', name="list_course_role_members"),
url(r'^modify_access$',
'instructor.views.api.modify_access', name="modify_access"),
url(r'^bulk_beta_modify_access$',
'instructor.views.api.bulk_beta_modify_access', name="bulk_beta_modify_access"),
url(r'^get_grading_config$',
'instructor.views.api.get_grading_config', name="get_grading_config"),
url(r'^get_students_features(?P<csv>/csv)?$',
'instructor.views.api.get_students_features', name="get_students_features"),
url(r'^get_anon_ids$',
'instructor.views.api.get_anon_ids', name="get_anon_ids"),
url(r'^get_distribution$',
'instructor.views.api.get_distribution', name="get_distribution"),
url(r'^get_student_progress_url$',
'instructor.views.api.get_student_progress_url', name="get_student_progress_url"),
url(r'^reset_student_attempts$',
'instructor.views.api.reset_student_attempts', name="reset_student_attempts"),
url(r'^rescore_problem$',
'instructor.views.api.rescore_problem', name="rescore_problem"),
url(r'^list_instructor_tasks$',
'instructor.views.api.list_instructor_tasks', name="list_instructor_tasks"),
url(r'^list_background_email_tasks$',
'instructor.views.api.list_background_email_tasks', name="list_background_email_tasks"),
url(r'^list_forum_members$',
'instructor.views.api.list_forum_members', name="list_forum_members"),
url(r'^update_forum_role_membership$',
'instructor.views.api.update_forum_role_membership', name="update_forum_role_membership"),
url(r'^proxy_legacy_analytics$',
'instructor.views.api.proxy_legacy_analytics', name="proxy_legacy_analytics"),
url(r'^send_email$',
'instructor.views.api.send_email', name="send_email"),
url(r'^change_due_date$', 'instructor.views.api.change_due_date',
name='change_due_date'),
url(r'^reset_due_date$', 'instructor.views.api.reset_due_date',
name='reset_due_date'),
url(r'^show_unit_extensions$', 'instructor.views.api.show_unit_extensions',
name='show_unit_extensions'),
url(r'^show_student_extensions$', 'instructor.views.api.show_student_extensions',
name='show_student_extensions'),
# Grade downloads...
url(r'^list_report_downloads$',
'instructor.views.api.list_report_downloads', name="list_report_downloads"),
url(r'calculate_grades_csv$',
'instructor.views.api.calculate_grades_csv', name="calculate_grades_csv"),
)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.