repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
surgebiswas/poker | PokerBots_2017/Johnny/theano/tensor/nnet/ConvGrad3D.py | 6 | 11911 | from six.moves import xrange
import numpy as N
import theano
from theano.tensor import basic as T
from theano.misc import strutil
from theano.gradient import grad_undefined
from theano.gradient import DisconnectedType
# TODO: speed up by reordering loops. Should pass through the videos once, incrementing all weight gradients, rather
# than visiting each weight gradient element once and passing through whole video
class ConvGrad3D(theano.Op):
"""
Gradient of Conv3D with respect to W.
"""
__props__ = ()
def c_code_cache_version(self):
return (1,)
def make_node(self, V, d, WShape, dCdH):
V_ = T.as_tensor_variable(V)
d_ = T.as_tensor_variable(d)
WShape_ = T.as_tensor_variable(WShape)
dCdH_ = T.as_tensor_variable(dCdH)
return theano.Apply(self,
inputs=[V_, d_, WShape_, dCdH_],
outputs=[T.TensorType(
V_.dtype,
(False, False, False, False, False))()])
def infer_shape(self, node, input_shapes):
V, d, W_shape, dCdH = node.inputs
return [(W_shape[0], W_shape[1], W_shape[2], W_shape[3], W_shape[4])]
def connection_pattern(self, node):
return [[True], [True], [False], [True]]
def grad(self, inputs, output_gradients):
C, d, WShape, B = inputs
dLdA, = output_gradients
z = T.zeros_like(C[0, 0, 0, 0, :])
dLdC = theano.tensor.nnet.convTransp3D(dLdA, z, d, B, C.shape[1:4])
# d actually does affect the outputs, so it's not disconnected
dLdd = grad_undefined(self, 1, d)
# The shape of the weights doesn't affect the output elements
dLdWShape = DisconnectedType()()
dLdB = theano.tensor.nnet.conv3D(C, dLdA, T.zeros_like(B[0, 0, 0, 0, :]), d)
return [dLdC, dLdd, dLdWShape, dLdB]
def perform(self, node, inputs, output_storage):
V, d, WShape, dCdH = inputs
# print "ConvGradW3D python code"
# partial C / partial W[j,z,k,l,m] = sum_i sum_p sum_q sum_r (partial C /partial H[i,j,p,q,r] ) * V[i,z,dr*p+k,dc*q+l,dt*r+m]
batchSize = dCdH.shape[0]
outputHeight = dCdH.shape[1]
outputWidth = dCdH.shape[2]
outputDur = dCdH.shape[3]
assert V.shape[0] == batchSize
dr, dc, dt = d
dCdW = N.zeros(WShape, dtype=V.dtype)
# print 'computing output of shape '+str(WShape)
for k in xrange(0, WShape[1]):
for l in xrange(0, WShape[2]):
for m in xrange(0, WShape[3]):
for i in xrange(0, batchSize):
for p in xrange(0, outputHeight):
for q in xrange(0, outputWidth):
for r in xrange(0, outputDur):
for j in xrange(0, WShape[0]):
for z in xrange(0, WShape[4]):
dCdW[j, k, l, m, z] += (
dCdH[i, p, q, r, j] *
V[i, dr * p + k, dc * q + l,
dt * r + m, z])
output_storage[0][0] = dCdW
def c_code(self, node, nodename, inputs, outputs, sub):
V, d, WShape, dCdH = inputs
fail = sub['fail']
dCdW = outputs[0]
codeSource = """
///////////// < code generated by ConvGradW3D >
//printf("\t\t\t\tConvGradW3D c code\\n");
//Check dimensionality of inputs
if (PyArray_NDIM(%(dCdH)s) != 5)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: dCdH must be a 5 dimensional tensor");
%(fail)s
}
if (PyArray_NDIM(%(V)s) != 5)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: V must be a 5 dimensional tensor");
%(fail)s
}
if (PyArray_NDIM(%(WShape)s) != 1)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: WShape must be a vector.");
%(fail)s
}
if (PyArray_NDIM(%(d)s) != 1)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: d must be a vector.");
%(fail)s
}
if (PyArray_DIMS(%(d)s)[0] != 3)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: 3 stride length arguments expected (row, col, time) but %%li were given", (long)PyArray_DIMS(%(d)s)[0]);
%(fail)s
}
{ //extra scope so that fail will not jump over declarations
//Read and check sizes of inputs
const int batchSize = PyArray_DIMS(%(V)s)[0];
if (PyArray_DIMS(%(WShape)s)[0] != 5)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: WShape must specify a 5D shape");
%(fail)s
}
if (!PyArray_ISCONTIGUOUS(%(WShape)s))
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: WShape must be contiguous");
%(fail)s
}
{ //extra scope so that fail will not jump over declarations
dtype_%(WShape)s * WShape = (dtype_%(WShape)s *) PyArray_DATA(%(WShape)s);
const int outputChannels = WShape[0];
const int inputChannels = PyArray_DIMS(%(V)s)[4];
if (WShape[4] != inputChannels)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: W operates on a %%i channel image but the image has %%i channels",(int) WShape[1],inputChannels);
%(fail)s
}
{ //extra scope so fail works
const int filterHeight = WShape[1];
const int filterWidth = WShape[2];
const int filterDur = WShape[3];
const int vidHeight = PyArray_DIMS(%(V)s)[1];
const int vidWidth = PyArray_DIMS(%(V)s)[2];
const int vidDur = PyArray_DIMS(%(V)s)[3];
if (vidHeight < filterHeight)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: W has a height of %%i but V is only %%i pixels tall", filterHeight, vidHeight);
%(fail)s
}
if (vidWidth < filterWidth)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: W has a width of %%i but V is only %%i pixels tall",filterWidth,vidWidth);
%(fail)s
}
if (vidDur < filterDur)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: W has a duration of %%i but V is only %%i pixels long",filterDur,vidDur);
%(fail)s
}
{ // extra scope so fail works
//Read and check stride arguments
const int dr = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,0);
const int dc = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,1);
const int dt = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,2);
if (dr <= 0 || dc <= 0 || dt <= 0)
{
PyErr_Format(PyExc_ValueError,"ConvGrad3D: Strides should all be positive but they are %%i, %%i, %%i",dr,dc,dt);
%(fail)s
}
{ // extra scope so fail works
//Compute correct sized of output
const int outputHeight = int( (vidHeight - filterHeight) / dr )+1;
const int outputWidth = int( (vidWidth - filterWidth) / dc )+1;
const int outputDur = int( (vidDur - filterDur) / dt ) +1;
if (PyArray_DIMS(%(dCdH)s)[0] != batchSize ||
PyArray_DIMS(%(dCdH)s)[4] != outputChannels ||
PyArray_DIMS(%(dCdH)s)[1] != outputHeight ||
PyArray_DIMS(%(dCdH)s)[2] != outputWidth ||
PyArray_DIMS(%(dCdH)s)[3] != outputDur)
{
PyErr_Format(PyExc_ValueError, "dCdH is the wrong size, expected (%%i,%%i,%%i,%%i,%%i), got (%%li,%%li,%%li,%%li,%%li)", batchSize, outputHeight, outputWidth, outputDur, outputChannels, (long)PyArray_DIMS(%(dCdH)s)[0], (long)PyArray_DIMS(%(dCdH)s)[1], (long)PyArray_DIMS(%(dCdH)s)[2], (long)PyArray_DIMS(%(dCdH)s)[3], (long)PyArray_DIMS(%(dCdH)s)[4]);
%(fail)s
}
{ // extra scope for fail
npy_intp dims[5];
dims[0] = outputChannels;
dims[4] = inputChannels;
dims[1] = filterHeight;
dims[2] = filterWidth;
dims[3] = filterDur;
if(!(%(dCdW)s) || PyArray_DIMS(%(dCdW)s)[0]!=dims[0] ||
PyArray_DIMS(%(dCdW)s)[1]!=dims[1] ||
PyArray_DIMS(%(dCdW)s)[2]!=dims[2] ||
PyArray_DIMS(%(dCdW)s)[3]!=dims[3] ||
PyArray_DIMS(%(dCdW)s)[4]!=dims[4] ){
Py_XDECREF(%(dCdW)s);
%(dCdW)s = (PyArrayObject *) PyArray_SimpleNew(5, dims, PyArray_DESCR(%(V)s)->type_num);
if (!(%(dCdW)s)) {
PyErr_Format(PyExc_MemoryError,"ConvGrad3D: Could not allocate dCdW");
%(fail)s
}
}
{ //extra scope so fail works
#define ELEM5(x, i,j,k,l,m) * ( dtype_ ## x *) ( PyArray_BYTES(x) + (i)*PyArray_STRIDES(x)[0]+(j)*PyArray_STRIDES(x)[1]+(k)*PyArray_STRIDES(x)[2]+(l)*PyArray_STRIDES(x)[3]+(m)*PyArray_STRIDES(x)[4] )
#define ELEM_AT(x, i) * ( dtype_ ## x *) ( PyArray_BYTES(x) + (i) )
const int dhs3 = PyArray_STRIDES(%(dCdH)s)[3];
const int dtvs3 = dt * PyArray_STRIDES(%(V)s)[3];
// Compute dCdW
//TODO-- see if this can be made faster by using ELEM_AT instead of ELEM5
// dCdW[j,k,l,m,z] = sum_i sum_p sum_q sum_r dCdH[i,p,q,r,j] * V[i,dr*p+k,dc*q+l,dt*r+m,z]
for (int j = 0; j < outputChannels; j++) {
for (int z = 0; z < inputChannels; z++) {
for (int k = 0; k < filterHeight; k++) {
for (int l = 0; l < filterWidth; l++) {
for (int m = 0; m < filterDur; m++) {
//printf("writePos %%i %%i %%i %%i %%i \\n",j,k,l,m,z);
dtype_%(dCdW)s & writePos = ELEM5(%(dCdW)s, j,k,l,m,z);
writePos = 0;
for (int i = 0; i < batchSize; i++) {
for (int p = 0; p < outputHeight; p++) {
for (int q = 0; q < outputWidth; q++) {
int Hpos = i * PyArray_STRIDES(%(dCdH)s)[0] + j * PyArray_STRIDES(%(dCdH)s)[4] + p * PyArray_STRIDES(%(dCdH)s)[1] + q * PyArray_STRIDES(%(dCdH)s)[2] ;
int Vpos = i * PyArray_STRIDES(%(V)s)[0] + z * PyArray_STRIDES(%(V)s)[4] + (dr * p+k) * PyArray_STRIDES(%(V)s)[1] + (dc*q+l) * PyArray_STRIDES(%(V)s)[2] + m * PyArray_STRIDES(%(V)s)[3];
for (int r = 0; r < outputDur; r++) {
writePos += ELEM5(%(dCdH)s,i,p,q,r,j) * ELEM5(%(V)s,i,dr*p+k,dc*q+l,dt*r+m,z);
//writePos += ELEM_AT(%(dCdH)s,Hpos) * ELEM_AT(%(V)s,Vpos);
Hpos += dhs3;
Vpos += dtvs3;
}
}
}
}
}
}
}
}
}
}}}}}}} // extra scope for fail
///////////// < /code generated by ConvGradW3D >
"""
return strutil.render_string(codeSource, locals())
convGrad3D = ConvGrad3D()
| mit | 7,080,477,647,610,002,000 | 41.237589 | 368 | 0.471917 | false |
glycerine/goq | vendor/git.apache.org/thrift.git/tutorial/py/PythonServer.py | 12 | 3107 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import glob
import sys
sys.path.append('gen-py')
sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])
from tutorial import Calculator
from tutorial.ttypes import InvalidOperation, Operation
from shared.ttypes import SharedStruct
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
class CalculatorHandler:
def __init__(self):
self.log = {}
def ping(self):
print('ping()')
def add(self, n1, n2):
print('add(%d,%d)' % (n1, n2))
return n1 + n2
def calculate(self, logid, work):
print('calculate(%d, %r)' % (logid, work))
if work.op == Operation.ADD:
val = work.num1 + work.num2
elif work.op == Operation.SUBTRACT:
val = work.num1 - work.num2
elif work.op == Operation.MULTIPLY:
val = work.num1 * work.num2
elif work.op == Operation.DIVIDE:
if work.num2 == 0:
x = InvalidOperation()
x.whatOp = work.op
x.why = 'Cannot divide by 0'
raise x
val = work.num1 / work.num2
else:
x = InvalidOperation()
x.whatOp = work.op
x.why = 'Invalid operation'
raise x
log = SharedStruct()
log.key = logid
log.value = '%d' % (val)
self.log[logid] = log
return val
def getStruct(self, key):
print('getStruct(%d)' % (key))
return self.log[key]
def zip(self):
print('zip()')
if __name__ == '__main__':
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
transport = TSocket.TServerSocket(host='127.0.0.1', port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
# You could do one of these for a multithreaded server
# server = TServer.TThreadedServer(
# processor, transport, tfactory, pfactory)
# server = TServer.TThreadPoolServer(
# processor, transport, tfactory, pfactory)
print('Starting the server...')
server.serve()
print('done.')
| bsd-2-clause | -850,615,661,090,324,600 | 29.165049 | 76 | 0.643064 | false |
indictranstech/phr-frappe | frappe/modules/patch_handler.py | 5 | 2956 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Execute Patch Files
To run directly
python lib/wnf.py patch patch1, patch2 etc
python lib/wnf.py patch -f patch1, patch2 etc
where patch1, patch2 is module name
"""
import frappe, os
class PatchError(Exception): pass
def run_all():
"""run all pending patches"""
executed = [p[0] for p in frappe.db.sql("""select patch from `tabPatch Log`""")]
for patch in get_all_patches():
if patch and (patch not in executed):
if not run_single(patchmodule = patch):
log(patch + ': failed: STOPPED')
raise PatchError(patch)
def get_all_patches():
patches = []
for app in frappe.get_installed_apps():
# 3-to-4 fix
if app=="webnotes":
app="frappe"
patches.extend(frappe.get_file_items(frappe.get_pymodule_path(app, "patches.txt")))
return patches
def reload_doc(args):
import frappe.modules
run_single(method = frappe.modules.reload_doc, methodargs = args)
def run_single(patchmodule=None, method=None, methodargs=None, force=False):
from frappe import conf
# don't write txt files
conf.developer_mode = 0
if force or method or not executed(patchmodule):
return execute_patch(patchmodule, method, methodargs)
else:
return True
def execute_patch(patchmodule, method=None, methodargs=None):
"""execute the patch"""
block_user(True)
frappe.db.begin()
try:
log('Executing {patch} in {site} ({db})'.format(patch=patchmodule or str(methodargs),
site=frappe.local.site, db=frappe.db.cur_db_name))
if patchmodule:
if patchmodule.startswith("execute:"):
exec patchmodule.split("execute:")[1] in globals()
else:
frappe.get_attr(patchmodule.split()[0] + ".execute")()
update_patch_log(patchmodule)
elif method:
method(**methodargs)
except Exception:
frappe.db.rollback()
raise
else:
frappe.db.commit()
block_user(False)
log('Success')
return True
def update_patch_log(patchmodule):
"""update patch_file in patch log"""
frappe.get_doc({"doctype": "Patch Log", "patch": patchmodule}).insert()
def executed(patchmodule):
"""return True if is executed"""
done = frappe.db.get_value("Patch Log", {"patch": patchmodule})
# if done:
# print "Patch %s already executed in %s" % (patchmodule, frappe.db.cur_db_name)
return done
def block_user(block):
"""stop/start execution till patch is run"""
frappe.local.flags.in_patch = block
frappe.db.begin()
msg = "Patches are being executed in the system. Please try again in a few moments."
frappe.db.set_global('__session_status', block and 'stop' or None)
frappe.db.set_global('__session_status_message', block and msg or None)
frappe.db.commit()
def check_session_stopped():
if frappe.db.get_global("__session_status")=='stop':
frappe.msgprint(frappe.db.get_global("__session_status_message"))
raise frappe.SessionStopped('Session Stopped')
def log(msg):
print msg
| mit | -7,089,010,465,269,977,000 | 26.626168 | 87 | 0.708728 | false |
TheHonestGene/thehonestgene-pipeline | thehonestgenepipeline/imputation.py | 1 | 3612 | """
Pipeline for imputation
"""
from os import path
from celery.utils.log import get_task_logger
from celery.signals import after_setup_task_logger
from thehonestgenepipeline.celery import celery
from imputor.core import impute as imp
from imputor.core import genotype_parser
from . import GENOTYPE_FOLDER, DATA_FOLDER
from . import get_platform_from_genotype, save_analysis_data
from .progress_logger import CeleryProgressLogHandler
LOGGER = get_task_logger(imp.__name__)
@after_setup_task_logger.connect
def setup_task_logger(**kwargs):
"""
Log messages back to broker
"""
progress_handler = CeleryProgressLogHandler(celery, 'imputation')
LOGGER.addHandler(progress_handler)
@celery.task(serialiazer='json')
def convert(genotype_id, log_extra=None):
"""
Convert nucleotides to binary encoding
"""
if log_extra is None:
log_extra = {'progress':0, 'max_progress':100}
try:
log_extra['id'] = genotype_id
filename = '%s.hdf5' % genotype_id
LOGGER.info('Starting Conversion', extra=log_extra)
genotype_file = '%s/ORIGINAL/%s' % (GENOTYPE_FOLDER, filename)
output_file = '%s/CONVERTED/%s' % (GENOTYPE_FOLDER, filename)
if not path.exists(genotype_file):
raise Exception('Genotype file %s not found' % genotype_file)
# Need to pass in
platform = get_platform_from_genotype(genotype_file)
nt_map_file = '%s/NT_DATA/%s_nt_map.pickled' % (DATA_FOLDER, platform)
result = genotype_parser.convert_genotype_nt_key_encoding(genotype_file, output_file, nt_map_file, log_extra=log_extra)
save_analysis_data(output_file, result, 'convert')
LOGGER.info('Finished Conversion', extra={'progress':log_extra.get('max_progress', 100), 'id':genotype_id})
except Exception as err:
LOGGER.error('Error during conversion', extra=log_extra)
raise err
return result
@celery.task(serialiazer='json')
def impute(genotype_id, log_extra=None):
"""
Impute missing SNPs
"""
if log_extra is None:
log_extra = {'progress':0, 'max_progress':100}
try:
log_extra['id'] = genotype_id
filename = '%s.hdf5' % genotype_id
LOGGER.info('Starting Imputation', extra=log_extra)
genotype_file = '%s/CONVERTED/%s' % (GENOTYPE_FOLDER, filename)
output_file = '%s/IMPUTED/%s' % (GENOTYPE_FOLDER, filename)
if not path.exists(genotype_file):
raise Exception('Genotype file %s not found' % genotype_file)
# Need to pass in
platform = get_platform_from_genotype(genotype_file)
ld_folder = '%s/LD_DATA/%s' % (DATA_FOLDER, platform)
result = imp.impute(genotype_file, ld_folder, output_file, log_extra=log_extra)
save_analysis_data(output_file, result, 'imputation')
LOGGER.info('Finished Imputation', extra={'progress':log_extra.get('max_progress', 100), 'id':genotype_id})
except Exception as err:
LOGGER.error('Error during imputation', extra=log_extra)
raise err
return result
@celery.task(serialiazer='json')
def imputation(genotype_id):
"""
Entire imputation pipeline consisting of convert and impute
"""
result = {}
LOGGER.info('Starting Imputation Pipeline', extra={'progress':0, 'id':genotype_id})
result['convert'] = convert(genotype_id, {'progress':5, 'max_progress':20})
result['imputation'] = impute(genotype_id, {'progress':20, 'max_progress':95})
LOGGER.info('Finished Imputation Pipeline', extra={'progress':100, 'id':genotype_id, 'state':'FINISHED'})
return result
| mit | 7,112,620,503,880,446,000 | 39.133333 | 127 | 0.668605 | false |
emperorcezar/django-guardian | guardian/testapp/tests/utils_test.py | 35 | 4222 | from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Group, AnonymousUser
from django.db import models
from guardian.compat import get_user_model
from guardian.testapp.tests.conf import skipUnlessTestApp
from guardian.testapp.tests.core_test import ObjectPermissionTestCase
from guardian.testapp.models import Project
from guardian.testapp.models import ProjectUserObjectPermission
from guardian.testapp.models import ProjectGroupObjectPermission
from guardian.models import UserObjectPermission
from guardian.models import UserObjectPermissionBase
from guardian.models import GroupObjectPermission
from guardian.utils import get_anonymous_user
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
from guardian.utils import get_obj_perms_model
from guardian.exceptions import NotUserNorGroup
User = get_user_model()
class GetAnonymousUserTest(TestCase):
def test(self):
anon = get_anonymous_user()
self.assertTrue(isinstance(anon, User))
class GetIdentityTest(ObjectPermissionTestCase):
def test_user(self):
user, group = get_identity(self.user)
self.assertTrue(isinstance(user, User))
self.assertEqual(group, None)
def test_anonymous_user(self):
anon = AnonymousUser()
user, group = get_identity(anon)
self.assertTrue(isinstance(user, User))
self.assertEqual(group, None)
def test_group(self):
user, group = get_identity(self.group)
self.assertTrue(isinstance(group, Group))
self.assertEqual(user, None)
def test_not_user_nor_group(self):
self.assertRaises(NotUserNorGroup, get_identity, 1)
self.assertRaises(NotUserNorGroup, get_identity, "User")
self.assertRaises(NotUserNorGroup, get_identity, User)
@skipUnlessTestApp
class GetUserObjPermsModelTest(TestCase):
def test_for_instance(self):
project = Project(name='Foobar')
self.assertEqual(get_user_obj_perms_model(project),
ProjectUserObjectPermission)
def test_for_class(self):
self.assertEqual(get_user_obj_perms_model(Project),
ProjectUserObjectPermission)
def test_default(self):
self.assertEqual(get_user_obj_perms_model(ContentType),
UserObjectPermission)
def test_user_model(self):
# this test assumes that there were no direct obj perms model to User
# model defined (i.e. while testing guardian app in some custom project)
self.assertEqual(get_user_obj_perms_model(User),
UserObjectPermission)
@skipUnlessTestApp
class GetGroupObjPermsModelTest(TestCase):
def test_for_instance(self):
project = Project(name='Foobar')
self.assertEqual(get_group_obj_perms_model(project),
ProjectGroupObjectPermission)
def test_for_class(self):
self.assertEqual(get_group_obj_perms_model(Project),
ProjectGroupObjectPermission)
def test_default(self):
self.assertEqual(get_group_obj_perms_model(ContentType),
GroupObjectPermission)
def test_group_model(self):
# this test assumes that there were no direct obj perms model to Group
# model defined (i.e. while testing guardian app in some custom project)
self.assertEqual(get_group_obj_perms_model(Group),
GroupObjectPermission)
class GetObjPermsModelTest(TestCase):
def test_image_field(self):
class SomeModel(models.Model):
image = models.FileField(upload_to='images/')
obj = SomeModel()
perm_model = get_obj_perms_model(obj, UserObjectPermissionBase,
UserObjectPermission)
self.assertEqual(perm_model, UserObjectPermission)
def test_file_field(self):
class SomeModel2(models.Model):
file = models.FileField(upload_to='images/')
obj = SomeModel2()
perm_model = get_obj_perms_model(obj, UserObjectPermissionBase,
UserObjectPermission)
self.assertEqual(perm_model, UserObjectPermission)
| bsd-2-clause | -6,942,635,375,847,695,000 | 33.892562 | 80 | 0.717906 | false |
pseudocubic/neutronpy | neutronpy/instrument/detector.py | 3 | 2517 | # -*- coding: utf-8 -*-
r"""Defines Detector class for use in TimeOfFlightInstrument
"""
class Detector(object):
r"""Class defining a neutron detector for Time of Flight spectrometer
resolution calculations.
Parameters
----------
shape : str
Shape of the detector. 'cylindrical' or 'spherical'
width : list
Horizontal coverage of the detector from sample, where 0 is the
angle of ki where it would hit the detector, in degrees.
height : list
Vertical coverage of the detector from the sample, where 0 is the
position of ki where it would hit the detector, in degrees.
radius : float
Radius of curvature of the detector, i.e. the distance
from the sample to the detector, in cm.
hpixels : int
Angular acceptance of a single detector in the horizontal orientation,
in arc minutes.
vpixels : int
Number of detector pixels in the vertical direction
tau : float, optional
Binning of the detector in microseconds.
thickness : float, optional
Thickness of the detector in cm.
orientation : string
Orientation of the cylinder, 'horizontal' or 'vertical', where the
radius of curvature rotates around the horizontal or vertical axis,
respectively. Required for shape == 'cylindrical'.
dead_angles : array-like, optional
List where dead angles are entered such that [start, stop], in
degrees. If more than one range of dead angles, pass list of lists.
Attributes
----------
shape
width
height
radius
tau
thickness
orientation
dead_angles
"""
def __init__(self, shape, width, height, radius, hpixels, vpixels, tau=0.1, thickness=1, orientation=None, dead_angles=None):
self.shape = shape
self.width = width
self.height = height
self.radius = radius
self.tau = tau
self.thickness = thickness
self.hpixels = hpixels
self.vpixels = vpixels
if dead_angles:
self.dead_angles = dead_angles
if orientation:
self.orientation = orientation
def __repr__(self):
args = ', '.join([str(getattr(self, key)) for key in ['shape', 'width', 'height', 'radius']])
kwargs = ', '.join(
['{0}={1}'.format(key, getattr(self, key, None)) for key in ['resolution', 'orientation', 'dead_angles']])
return "Detector({0})".format(', '.join([args, kwargs])) | mit | -5,932,480,844,017,357,000 | 30.475 | 129 | 0.627731 | false |
jvdm/AutobahnPython | examples/twisted/wamp/pubsub/basic/frontend.py | 8 | 2742 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import print_function
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that subscribes and receives events, and
stop after having received 5 events.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
self.received = 0
sub = yield self.subscribe(self.on_event, u'com.myapp.topic1')
print("Subscribed to com.myapp.topic1 with {}".format(sub.id))
def on_event(self, i):
print("Got event: {}".format(i))
self.received += 1
# self.config.extra for configuration, etc. (see [A])
if self.received > self.config.extra['max_events']:
print("Received enough events; disconnecting.")
self.leave()
def onDisconnect(self):
print("disconnected")
if reactor.running:
reactor.stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
extra=dict(
max_events=5, # [A] pass in additional configuration
),
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| mit | -3,926,244,008,126,862,300 | 36.561644 | 79 | 0.655361 | false |
ngugi/geonode | pavement.py | 1 | 19902 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import re
import shutil
import sys
import time
import urllib
import urllib2
import zipfile
import glob
import fileinput
from setuptools.command import easy_install
from paver.easy import task, options, cmdopts, needs
from paver.easy import path, sh, info, call_task
from paver.easy import BuildFailure
try:
from geonode.settings import GEONODE_APPS
except:
#probably trying to run install_win_deps.
pass
try:
from paver.path import pushd
except ImportError:
from paver.easy import pushd
assert sys.version_info >= (2, 6), \
SystemError("GeoNode Build requires python 2.6 or better")
def grab(src, dest, name):
download = True
if not dest.exists():
print 'Downloading %s' % name
elif not zipfile.is_zipfile(dest):
print 'Downloading %s (corrupt file)' % name
else:
download = False
if download:
urllib.urlretrieve(str(src), str(dest))
GEOSERVER_URL = "http://build.geonode.org/geoserver/latest/geoserver.war"
DATA_DIR_URL = "http://build.geonode.org/geoserver/latest/data.zip"
JETTY_RUNNER_URL = "http://repo2.maven.org/maven2/org/mortbay/jetty/jetty-runner/8.1.8.v20121106/jetty-runner-8.1.8.v20121106.jar"
@task
def setup_geoserver(options):
"""Prepare a testing instance of GeoServer."""
download_dir = path('downloaded')
if not download_dir.exists():
download_dir.makedirs()
geoserver_dir = path('geoserver')
geoserver_bin = download_dir / os.path.basename(GEOSERVER_URL)
jetty_runner = download_dir / os.path.basename(JETTY_RUNNER_URL)
grab(GEOSERVER_URL, geoserver_bin, "geoserver binary")
grab(JETTY_RUNNER_URL, jetty_runner, "jetty runner")
if not geoserver_dir.exists():
geoserver_dir.makedirs()
webapp_dir = geoserver_dir / 'geoserver'
if not webapp_dir:
webapp_dir.makedirs()
print 'extracting geoserver'
z = zipfile.ZipFile(geoserver_bin, "r")
z.extractall(webapp_dir)
_install_data_dir()
def _install_data_dir():
target_data_dir = path('geoserver/data')
if target_data_dir.exists():
target_data_dir.rmtree()
original_data_dir = path('geoserver/geoserver/data')
justcopy(original_data_dir, target_data_dir)
config = path('geoserver/data/security/auth/geonodeAuthProvider/config.xml')
with open(config) as f:
xml = f.read()
m = re.search('baseUrl>([^<]+)', xml)
xml = xml[:m.start(1)] + "http://localhost:8000/" + xml[m.end(1):]
with open(config, 'w') as f:
f.write(xml)
@task
def static(options):
with pushd('geonode/static'):
sh('make')
@task
@needs([
'setup_geoserver',
])
def setup(options):
"""Get dependencies and prepare a GeoNode development environment."""
info(('GeoNode development environment successfully set up.'
'If you have not set up an administrative account,'
' please do so now. Use "paver start" to start up the server.'))
def grab_winfiles(url, dest, packagename):
#~gohlke needs a user agent that is not python
# Add your headers
headers = {'User-Agent': 'Mozilla 5.10'}
request = urllib2.Request(url, None, headers)
response = urllib2.urlopen(request)
with open(dest, 'wb') as writefile:
writefile.write(response.read())
@task
def win_install_deps(options):
"""
Install all Windows Binary automatically
"""
download_dir = path('downloaded').abspath()
if not download_dir.exists():
download_dir.makedirs()
win_packages = {
"PIL": "https://pypi.python.org/packages/2.7/P/Pillow/Pillow-2.5.1.win32-py2.7.exe",
"Py2exe": "http://superb-dca2.dl.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.7.exe",
"Nose": "https://s3.amazonaws.com/geonodedeps/nose-1.3.3.win32-py2.7.exe",
"LXML": "https://pypi.python.org/packages/2.7/l/lxml/lxml-3.3.5.win32-py2.7.exe",
"GDAL": "https://s3.amazonaws.com/geonodedeps/GDAL-1.11.0.win32-py2.7.exe",
"PyProj": "https://pyproj.googlecode.com/files/pyproj-1.9.3.win32-py2.7.exe",
"Shapely": "https://pypi.python.org/packages/2.7/S/Shapely/Shapely-1.3.0.win32-py2.7.exe",
"Psycopg2": "http://www.stickpeople.com/projects/python/win-psycopg/psycopg2-2.4.5.win32-py2.7-pg9.1.3-release.exe"
}
for package, url in win_packages.iteritems():
tempfile = download_dir / os.path.basename(url)
grab_winfiles(url, tempfile, package)
try:
easy_install.main([tempfile])
except:
print "install failed"
os.remove(tempfile)
print "Windows dependencies now complete. Run pip install -e geonode --use-mirrors"
@cmdopts([
('version=', 'v', 'Legacy GeoNode version of the existing database.')
])
@task
def upgradedb(options):
"""
Add 'fake' data migrations for existing tables from legacy GeoNode versions
"""
version = options.get('version')
if version in ['1.1', '1.2']:
sh("python manage.py migrate maps 0001 --fake")
sh("python manage.py migrate avatar 0001 --fake")
elif version is None:
print "Please specify your GeoNode version"
else:
print "Upgrades from version %s are not yet supported." % version
@task
def sync(options):
"""
Run the syncdb and migrate management commands to create and migrate a DB
"""
sh("python manage.py syncdb --noinput")
#sh("python manage.py migrate --noinput")
sh("python manage.py loaddata sample_admin.json")
@task
def package(options):
"""
Creates a tarball to use for building the system elsewhere
"""
import pkg_resources
import tarfile
import geonode
version = geonode.get_version()
# Use GeoNode's version for the package name.
pkgname = 'GeoNode-%s-all' % version
# Create the output directory.
out_pkg = path(pkgname)
out_pkg_tar = path("%s.tar.gz" % pkgname)
# Create a distribution in zip format for the geonode python package.
dist_dir = path('dist')
dist_dir.rmtree()
sh('python setup.py sdist --formats=zip')
with pushd('package'):
#Delete old tar files in that directory
for f in glob.glob('GeoNode*.tar.gz'):
old_package = path(f)
if old_package != out_pkg_tar:
old_package.remove()
if out_pkg_tar.exists():
info('There is already a package for version %s' % version)
return
# Clean anything that is in the oupout package tree.
out_pkg.rmtree()
out_pkg.makedirs()
support_folder = path('support')
install_file = path('install.sh')
# And copy the default files from the package folder.
justcopy(support_folder, out_pkg / 'support')
justcopy(install_file, out_pkg)
geonode_dist = path('..') / 'dist' / 'GeoNode-%s.zip' % version
justcopy(geonode_dist, out_pkg)
# Create a tar file with all files in the output package folder.
tar = tarfile.open(out_pkg_tar, "w:gz")
for file in out_pkg.walkfiles():
tar.add(file)
# Add the README with the license and important links to documentation.
tar.add('README', arcname=('%s/README.rst' % out_pkg))
tar.close()
# Remove all the files in the temporary output package directory.
out_pkg.rmtree()
# Report the info about the new package.
info("%s created" % out_pkg_tar.abspath())
@task
@needs(['start_geoserver',
'sync',
'start_django'])
@cmdopts([
('bind=', 'b', 'Bind server to provided IP address and port number.'),
('java_path=', 'j', 'Full path to java install for Windows'),
('foreground', 'f', 'Do not run in background but in foreground')
], share_with=['start_django', 'start_geoserver'])
def start():
"""
Start GeoNode (Django, GeoServer & Client)
"""
info("GeoNode is now available.")
@task
def stop_django():
"""
Stop the GeoNode Django application
"""
kill('python', 'runserver')
@task
def stop_geoserver():
"""
Stop GeoServer
"""
kill('java', 'geoserver')
@task
def stop():
"""
Stop GeoNode
"""
#windows needs to stop the geoserver first b/c we can't tell which python is running, so we kill everything
stop_geoserver()
info("Stopping GeoNode ...")
stop_django()
@cmdopts([
('bind=', 'b', 'Bind server to provided IP address and port number.')
])
@task
def start_django():
"""
Start the GeoNode Django application
"""
bind = options.get('bind', '')
foreground = '' if options.get('foreground', False) else '&'
sh('python manage.py runserver %s %s' % (bind, foreground))
@cmdopts([
('java_path=', 'j', 'Full path to java install for Windows')
])
@task
def start_geoserver(options):
"""
Start GeoServer with GeoNode extensions
"""
from geonode.settings import OGC_SERVER
GEOSERVER_BASE_URL = OGC_SERVER['default']['LOCATION']
url = "http://localhost:8080/geoserver/"
if GEOSERVER_BASE_URL != url:
print 'your GEOSERVER_BASE_URL does not match %s' % url
sys.exit(1)
download_dir = path('downloaded').abspath()
jetty_runner = download_dir / os.path.basename(JETTY_RUNNER_URL)
data_dir = path('geoserver/data').abspath()
web_app = path('geoserver/geoserver').abspath()
log_file = path('geoserver/jetty.log').abspath()
config = path('scripts/misc/jetty-runner.xml').abspath()
# @todo - we should not have set workdir to the datadir but a bug in geoserver
# prevents geonode security from initializing correctly otherwise
with pushd(data_dir):
javapath = "java"
loggernullpath = os.devnull
# checking if our loggernullpath exists and if not, reset it to something manageable
if loggernullpath == "nul":
open("../../downloaded/null.txt", 'w+').close()
loggernullpath = "../../downloaded/null.txt"
try:
sh(('java -version'))
except:
if not options.get('java_path', None):
print "Paver cannot find java in the Windows Environment. Please provide the --java_path flag with your full path to java.exe e.g. --java_path=C:/path/to/java/bin/java.exe"
sys.exit(1)
# if there are spaces
javapath = 'START /B "" "' + options['java_path'] + '"'
sh((
'%(javapath)s -Xmx512m -XX:MaxPermSize=256m'
' -DGEOSERVER_DATA_DIR=%(data_dir)s'
# workaround for JAI sealed jar issue and jetty classloader
' -Dorg.eclipse.jetty.server.webapp.parentLoaderPriority=true'
' -jar %(jetty_runner)s'
' --log %(log_file)s'
' %(config)s'
' > %(loggernullpath)s &' % locals()
))
info('Starting GeoServer on %s' % url)
# wait for GeoServer to start
started = waitfor(url)
info('The logs are available at %s' % log_file)
if not started:
# If applications did not start in time we will give the user a chance
# to inspect them and stop them manually.
info(('GeoServer never started properly or timed out.'
'It may still be running in the background.'))
sys.exit(1)
@task
def test(options):
"""
Run GeoNode's Unit Test Suite
"""
sh("python manage.py test %s.tests --noinput" % '.tests '.join(GEONODE_APPS))
@task
def test_javascript(options):
with pushd('geonode/static/geonode'):
sh('./run-tests.sh')
@task
@cmdopts([
('name=', 'n', 'Run specific tests.')
])
def test_integration(options):
"""
Run GeoNode's Integration test suite against the external apps
"""
_reset()
# Start GeoServer
call_task('start_geoserver')
info("GeoNode is now available, running the tests now.")
name = options.get('name', 'geonode.tests.integration')
success = False
try:
if name == 'geonode.tests.csw':
call_task('start')
sh('sleep 30')
call_task('setup_data')
sh(('python manage.py test %s'
' --noinput --liveserver=localhost:8000' % name))
except BuildFailure, e:
info('Tests failed! %s' % str(e))
else:
success = True
finally:
# don't use call task here - it won't run since it already has
stop()
_reset()
if not success:
sys.exit(1)
@task
def run_tests():
"""
Executes the entire test suite.
"""
sh('python manage.py test geonode.tests.smoke')
call_task('test')
call_task('test_integration')
call_task('test_integration', options={'name': 'geonode.tests.csw'})
sh('flake8 geonode')
@task
@needs(['stop'])
def reset():
"""
Reset a development environment (Database, GeoServer & Catalogue)
"""
_reset()
def _reset():
sh("rm -rf geonode/development.db")
sh("rm -rf geonode/uploaded/*")
_install_data_dir()
@needs(['reset'])
def reset_hard():
"""
Reset a development environment (Database, GeoServer & Catalogue)
"""
sh("git clean -dxf")
@task
@cmdopts([
('type=', 't', 'Import specific data type ("vector", "raster", "time")'),
])
def setup_data():
"""
Import sample data (from gisdata package) into GeoNode
"""
import gisdata
ctype = options.get('type', None)
data_dir = gisdata.GOOD_DATA
if ctype in ['vector', 'raster', 'time']:
data_dir = os.path.join(gisdata.GOOD_DATA, ctype)
sh("python manage.py importlayers %s -v2" % data_dir)
@needs(['package'])
@cmdopts([
('key=', 'k', 'The GPG key to sign the package'),
('ppa=', 'p', 'PPA this package should be published to.'),
])
def deb(options):
"""
Creates debian packages.
Example uses:
paver deb
paver deb -k 12345
paver deb -k 12345 -p geonode/testing
"""
key = options.get('key', None)
ppa = options.get('ppa', None)
version, simple_version = versions()
info('Creating package for GeoNode version %s' % version)
# Get rid of any uncommitted changes to debian/changelog
info('Getting rid of any uncommitted changes in debian/changelog')
sh('git checkout package/debian/changelog')
# Workaround for git-dch bug
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594580
sh('ln -s %s %s' % (os.path.realpath('.git'), os.path.realpath('package')))
with pushd('package'):
# Install requirements
#sh('sudo apt-get -y install debhelper devscripts git-buildpackage')
sh(('git-dch --spawn-editor=snapshot --git-author --new-version=%s'
' --id-length=6 --ignore-branch --release' % (simple_version)))
deb_changelog = path('debian') / 'changelog'
for line in fileinput.input([deb_changelog], inplace=True):
print line.replace("urgency=medium", "urgency=high"),
## Revert workaround for git-dhc bug
sh('rm -rf .git')
if key is None and ppa is None:
# A local installable package
sh('debuild -uc -us -A')
elif key is None and ppa is not None:
# A sources package, signed by daemon
sh('debuild -S')
elif key is not None and ppa is None:
# A signed installable package
sh('debuild -k%s -A' % key)
elif key is not None and ppa is not None:
# A signed, source package
sh('debuild -k%s -S' % key)
if ppa is not None:
sh('dput ppa:%s geonode_%s_source.changes' % (ppa, simple_version))
@task
def publish():
if 'GPG_KEY_GEONODE' in os.environ:
key = os.environ['GPG_KEY_GEONODE']
else:
print "You need to set the GPG_KEY_GEONODE environment variable"
return
call_task('deb', options={
'key': key,
'ppa': 'geonode/testing',
})
version, simple_version = versions()
sh('git add package/debian/changelog')
sh('git commit -m "Updated changelog for version %s"' % version)
sh('git tag %s' % version)
sh('git push origin %s' % version)
sh('git tag debian/%s' % simple_version)
sh('git push origin debian/%s' % simple_version)
sh('git push origin master')
sh('python setup.py sdist upload')
def versions():
import geonode
from geonode.version import get_git_changeset
raw_version = geonode.__version__
version = geonode.get_version()
timestamp = get_git_changeset()
major, minor, revision, stage, edition = raw_version
branch = 'dev'
if stage == 'final':
stage = 'thefinal'
if stage == 'alpha' and edition == 0:
tail = '%s%s' % (branch, timestamp)
else:
tail = '%s%s' % (stage, edition)
simple_version = '%s.%s.%s+%s' % (major, minor, revision, tail)
return version, simple_version
def kill(arg1, arg2):
"""Stops a proces that contains arg1 and is filtered by arg2
"""
from subprocess import Popen, PIPE
# Wait until ready
t0 = time.time()
# Wait no more than these many seconds
time_out = 30
running = True
while running and time.time() - t0 < time_out:
if os.name == 'nt':
p = Popen('tasklist | find "%s"' % arg1, shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=False)
else:
p = Popen('ps aux | grep %s' % arg1, shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
lines = p.stdout.readlines()
running = False
for line in lines:
#this kills all java.exe and python including self in windows
if ('%s' % arg2 in line) or (os.name == 'nt' and '%s' % arg1 in line):
running = True
# Get pid
fields = line.strip().split()
info('Stopping %s (process number %s)' % (arg1, fields[1]))
if os.name == 'nt':
kill = 'taskkill /F /PID "%s"' % fields[1]
else:
kill = 'kill -9 %s 2> /dev/null' % fields[1]
os.system(kill)
# Give it a little more time
time.sleep(1)
else:
pass
if running:
raise Exception('Could not stop %s: '
'Running processes are\n%s'
% (arg1, '\n'.join([l.strip() for l in lines])))
def waitfor(url, timeout=300):
started = False
for a in xrange(timeout):
try:
resp = urllib.urlopen(url)
except IOError:
pass
else:
if resp.getcode() == 200:
started = True
break
time.sleep(1)
return started
def justcopy(origin, target):
if os.path.isdir(origin):
shutil.rmtree(target, ignore_errors=True)
shutil.copytree(origin, target)
elif os.path.isfile(origin):
if not os.path.exists(target):
os.makedirs(target)
shutil.copy(origin, target)
| gpl-3.0 | 5,731,227,493,082,974,000 | 28.793413 | 189 | 0.605618 | false |
ggaughan/pipe2py | tests/pypipelines/pipe_zKJifuNS3BGLRQK_GsevXg.py | 6 | 1875 | # Pipe pipe_zKJifuNS3BGLRQK_GsevXg generated by pipe2py
from pipe2py import Context
from pipe2py.modules.pipeforever import pipe_forever
from pipe2py.modules.pipefetch import pipe_fetch
from pipe2py.modules.pipesplit import pipe_split
from pipe2py.modules.pipecount import pipe_count
from pipe2py.modules.pipesimplemath import pipe_simplemath
from pipe2py.modules.pipesimplemath import pipe_simplemath
from pipe2py.modules.pipetruncate import pipe_truncate
from pipe2py.modules.pipeoutput import pipe_output
def pipe_zKJifuNS3BGLRQK_GsevXg(context=None, _INPUT=None, conf=None, **kwargs):
# todo: insert pipeline description here
conf = conf or {}
if context and context.describe_input:
return []
if context and context.describe_dependencies:
return [u'pipecount', u'pipefetch', u'pipeoutput', u'pipesimplemath', u'pipesplit', u'pipetruncate']
forever = pipe_forever()
sw_224 = pipe_fetch(
context, forever, conf={'URL': {'type': 'url', 'value': 'file://data/www.sciencedaily.com_rss_computers_math.html'}})
sw_250 = pipe_split(
context, sw_224, splits=2, conf=None)
sw_243 = pipe_count(
context, sw_250, conf=None)
sw_94 = pipe_simplemath(
context, sw_243, conf={'OTHER': {'type': 'number', 'value': '5'}, 'OP': {'type': 'text', 'value': 'modulo'}})
sw_169 = pipe_simplemath(
context, sw_243, OTHER=sw_94, conf={'OTHER': {'terminal': 'OTHER', 'type': 'number'}, 'OP': {'type': 'text', 'value': 'subtract'}})
sw_232 = pipe_truncate(
context, sw_250, count=sw_169, conf={'count': {'terminal': 'count', 'type': 'number'}})
_OUTPUT = pipe_output(
context, sw_232, conf=None)
return _OUTPUT
if __name__ == "__main__":
pipeline = pipe_zKJifuNS3BGLRQK_GsevXg(Context())
for i in pipeline:
print i | gpl-2.0 | 2,410,633,068,129,467,400 | 33.740741 | 139 | 0.6624 | false |
Mitali-Sodhi/CodeLingo | Dataset/python/mvc.py | 1 | 1601 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Model(object):
products = {
'milk': {'price': 1.50, 'quantity': 10},
'eggs': {'price': 0.20, 'quantity': 100},
'cheese': {'price': 2.00, 'quantity': 10}
}
class View(object):
def product_list(self, product_list):
print('PRODUCT LIST:')
for product in product_list:
print(product)
print('')
def product_information(self, product, product_info):
print('PRODUCT INFORMATION:')
print('Name: %s, Price: %.2f, Quantity: %d\n' %
(product.title(), product_info.get('price', 0),
product_info.get('quantity', 0)))
def product_not_found(self, product):
print('That product "%s" does not exist in the records' % product)
class Controller(object):
def __init__(self):
self.model = Model()
self.view = View()
def get_product_list(self):
product_list = self.model.products.keys()
self.view.product_list(product_list)
def get_product_information(self, product):
product_info = self.model.products.get(product, None)
if product_info is not None:
self.view.product_information(product, product_info)
else:
self.view.product_not_found(product)
if __name__ == '__main__':
controller = Controller()
controller.get_product_list()
controller.get_product_information('cheese')
controller.get_product_information('eggs')
controller.get_product_information('milk')
controller.get_product_information('arepas')
| mit | 8,446,651,251,772,069,000 | 27.087719 | 74 | 0.599001 | false |
openlabs/raven | tests/utils/stacks/tests.py | 3 | 1602 | # -*- coding: utf-8 -*-
from mock import Mock
from unittest2 import TestCase
from raven.utils.stacks import get_culprit, get_stack_info
class Context(object):
def __init__(self, dict):
self.dict = dict
__getitem__ = lambda s, *a: s.dict.__getitem__(*a)
__setitem__ = lambda s, *a: s.dict.__setitem__(*a)
iterkeys = lambda s, *a: s.dict.iterkeys(*a)
class StackTest(TestCase):
def test_get_culprit_bad_module(self):
culprit = get_culprit([{
'module': None,
'function': 'foo',
}])
self.assertEquals(culprit, '<unknown>.foo')
culprit = get_culprit([{
'module': 'foo',
'function': None,
}])
self.assertEquals(culprit, 'foo.<unknown>')
culprit = get_culprit([{
}])
self.assertEquals(culprit, '<unknown>.<unknown>')
def test_bad_locals_in_frame(self):
frame = Mock()
frame.f_locals = Context({
'foo': 'bar',
'biz': 'baz',
})
frame.f_lineno = 1
frame.f_globals = {}
frame.f_code.co_filename = __file__
frame.f_code.co_name = __name__
frames = [(frame, 1)]
results = get_stack_info(frames)
self.assertEquals(len(results), 1)
result = results[0]
self.assertTrue('vars' in result)
vars = result['vars']
self.assertTrue(isinstance(vars, dict))
self.assertTrue('foo' in vars)
self.assertEquals(vars['foo'], 'bar')
self.assertTrue('biz' in vars)
self.assertEquals(vars['biz'], 'baz')
| bsd-3-clause | 6,385,110,840,924,796,000 | 27.105263 | 58 | 0.541199 | false |
jastarex/DeepLearningCourseCodes | 06_Object_detection/Object_Detection_Tensorflow_API_demo/object_detection/models/ssd_mobilenet_v1_feature_extractor.py | 21 | 3813 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 features."""
def __init__(self,
depth_multiplier,
min_depth,
conv_hyperparams,
reuse_weights=None):
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
reuse_weights: Whether to reuse variables. Default is None.
"""
super(SSDMobileNetV1FeatureExtractor, self).__init__(
depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128],
}
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
preprocessed_inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
| apache-2.0 | -840,792,953,986,466,800 | 36.752475 | 80 | 0.644637 | false |
franek/weboob | docs/source/conf.py | 1 | 6390 | # -*- coding: utf-8 -*-
#
# Weboob documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 25 11:56:52 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
os.system('./genapi.py')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Weboob'
copyright = u'2010-2012, Weboob Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.f'
# The full version, including alpha/beta/rc tags.
release = '0.f'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Weboobdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Weboob.tex', u'Weboob Documentation',
u'Weboob Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| agpl-3.0 | 8,919,447,341,891,020,000 | 31.602041 | 99 | 0.711737 | false |
zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/284_MuertosFeather/__init__.py | 1 | 2362 | # Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
#Complete - 100%.
qn = "284_MuertosFeather"
#NPC'S
TREVOR = 32166
#ITEM'S
FEATHER = 9748
#MOB'S
MOBS = range(22239,22241)+range(22242,22244)+range(22245,22247)
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
feather = st.getQuestItemsCount(FEATHER)
if event == "32166-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "32166-06.htm" :
st.giveItems(57,feather*45)
st.takeItems(FEATHER,-1)
elif event == "32166-08.htm" :
st.takeItems(FEATHER,-1)
st.exitQuest(1)
return htmltext
def onTalk (self,npc,player):
npcId = npc.getNpcId()
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
id = st.getState()
cond = st.getInt("cond")
feather = st.getQuestItemsCount(FEATHER)
if id == CREATED and npcId == TREVOR :
if player.getLevel() < 11 :
htmltext = "32166-02.htm"
st.exitQuest(1)
else :
htmltext = "32166-01.htm"
elif id == STARTED and npcId == TREVOR :
if not feather :
htmltext = "32166-04.htm"
else :
htmltext = "32166-05.htm"
return htmltext
def onKill(self,npc,player,isPet) :
st = player.getQuestState(qn)
if not st: return
if st.getState() == STARTED :
npcId = npc.getNpcId()
chance = st.getRandom(100)
if (npcId in MOBS) and (chance < 70) : #Retail statistic info. 20 mob's - 14 feathers
st.giveItems(FEATHER,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(284, qn, "Muertos Feather")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(TREVOR)
QUEST.addTalkId(TREVOR)
for mob in MOBS :
QUEST.addKillId(mob)
| gpl-3.0 | 431,278,598,474,281,200 | 27.457831 | 154 | 0.639712 | false |
GodelBose/GTAV_self_driving_network | data_generator.py | 1 | 3176 | import numpy as np
import cv2
import matplotlib.pyplot as plt
class Data_generator:
def __init__(self, files_per_batch, total_files, files_directory, cam_view=True, map_view=True, speed_view=True, view_resize=None, return_axis=True, return_buttons=True, axis_indices =[], button_indices=[], seq_len=1, use_sampling=False):
self.files_per_batch = files_per_batch
self.total_files = total_files
self.files_directory = files_directory
self.cam_view = cam_view
self.map_view = map_view
self.speed_view = speed_view
self.view_resize = view_resize
self.return_axis = return_axis
self.return_buttons = return_buttons
self.axis_indices = axis_indices
self.button_indices = button_indices
self.seq_len = seq_len
self.use_sampling = use_sampling
def yield_data_deepgta(self):
indices = np.random.randint(0,self.total_files,self.files_per_batch)
X = []
Y = []
for i in indices:
file_name = self.files_directory+'/'+'training_data-{}.npy'.format(i) if self.files_directory else 'training_data-{}.npy'.format(i)
train_data = np.load(file_name)
for j in range(train_data[0]):
temp_inputs = []
temp_labels = []
frame = train_data[3][j]
temp_inputs.append(frame)
def yield_data(self):
indices = np.random.randint(0,self.total_files,self.files_per_batch)
X = []
Y = []
num_inputs = self.map_view + self.cam_view + self.speed_view
num_outputs = self.return_axis + self.return_buttons
random_radius = np.random.uniform(0.04,0.125)
normalizer = np.pi * random_radius**2
for i in indices:
file_name = self.files_directory+'/'+'training_data-{}.npy'.format(i) if self.files_directory else 'training_data-{}.npy'.format(i)
train_data = np.load(file_name)
for j, x in enumerate(train_data):
temp_inputs = []
temp_labels = []
# randomly discard samples with no steering to remove bias towards not steering
if self.use_sampling:
x_i,y_i = x[3][0], x[3][1]
radius = np.sqrt(x_i**2 + y_i**2)
volume = np.pi * radius**2
ratio = volume / normalizer
rand_f = np.random.uniform(0,1)
if rand_f > ratio:
continue
if self.map_view:
temp_inputs.append(x[1])
if self.cam_view:
# only consider 1 frame at a time
if self.seq_len == 1:
if self.view_resize:
screen = cv2.resize(x[0], self.view_resize)
temp_inputs.append(screen)
else:
temp_inputs.append(x[0])
else:
# consider multiframe scenario
if j >= self.seq_len:
views = [xx[0] for xx in train_data[j-self.seq_len:j]]
if self.view_resize:
views = [cv2.resize(xx, self.view_resize) for xx in views]
temp_inputs.append(np.array(views))
else:
continue
if self.speed_view:
temp_inputs.append(x[2][:,:,None])
if self.return_axis:
temp_labels.append([x[3][k] for k in self.axis_indices])
if self.return_buttons:
temp_labels.append([x[4][k] for k in self.button_indices])
X.append(temp_inputs)
Y.append(temp_labels)
#print(len(X[0]), len(X))
return [np.array([x[i] for x in X]) for i in range(num_inputs)], [np.array([y[i] for y in Y]) for i in range(num_outputs)]
| mit | -3,818,322,141,379,507,000 | 33.901099 | 239 | 0.650504 | false |
felipenaselva/felipe.repository | script.module.universalscrapers/lib/universalscrapers/scraperplugins/darewatch.py | 1 | 5676 | import re,requests,base64,time
import xbmcaddon
from ..scraper import Scraper
from ..common import clean_title,clean_search,random_agent,filter_host,send_log,error_log
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
class darewatch(Scraper):
domains = ['ondarewatch.com/']
name = "DareWatch"
sources = []
def __init__(self):
self.base_link = 'http://www.ondarewatch.com/'
self.search_url = self.base_link + '/index.php'
self.sources = []
if dev_log=='true':
self.start_time = time.time()
def scrape_movie(self, title, year, imdb, debrid=False):
try:
start_time = time.time()
search_id = clean_search(title.lower())
#print 'darewatch ID> ' + search_id
headers = {'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding':'gzip, deflate, br','accept-language':'en-US,en;q=0.8','content-type':'application/x-www-form-urlencoded',
'User-Agent':random_agent(),'origin':self.base_link,'referer':self.base_link+'/search'}
data = {'menu': 'search','query': search_id}
html = requests.post(self.search_url,headers=headers,data=data,timeout=5).content
#print 'DAREWARCH > post: '+html
page = html.split('Movie results for:')[1]
Regex = re.compile('<h4>.+?class="link" href="(.+?)" title="(.+?)"',re.DOTALL).findall(page)
for item_url,name in Regex:
#print '(grabbed url) %s (title) %s' %(item_url,name)
if not clean_title(title).lower() == clean_title(name).lower():
continue
if not year in name:
continue
#print 'Darewatch URL check> ' + item_url
self.get_source(item_url,title,year,'','',start_time)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,argument)
return self.sources
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_time = time.time()
search_id = clean_search(title.lower())
#print 'darewatch ID> ' + search_id
headers = {'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding':'gzip, deflate, br','accept-language':'en-US,en;q=0.8','content-type':'application/x-www-form-urlencoded',
'User-Agent':random_agent(),'origin':self.base_link,'referer':self.base_link+'/search'}
data = {'menu': 'search','query': search_id}
html = requests.post(self.search_url,headers=headers,data=data,timeout=5).content
#print 'DAREWARCH > post: '+html
page = html.split('TV show results for:')[1]
Regex = re.compile('<h4>.+?class="link" href="(.+?)" title="(.+?)"',re.DOTALL).findall(page)
for item_url,name in Regex:
#print '(grabbed url) %s (title) %s' %(item_url,name)
if not clean_title(title).lower() == clean_title(name).lower():
continue
if '/watchm/' not in item_url:
item_url = item_url + '/season/%s/episode/%s' %(season, episode)
#print 'Darewatch URL check> ' + item_url
self.get_source(item_url,title,year,season,episode,start_time)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,argument)
return self.sources
def get_source(self,item_url,title,year,season,episode,start_time):
try:
headers = {'User-Agent':random_agent()}
html = requests.get(item_url,headers=headers,timeout=10).content
match = re.compile("] = '(.+?)'",re.DOTALL).findall(html)
count = 0
for vid in match:
host = base64.b64decode(vid)
link=re.compile('.+?="(.+?)"',re.DOTALL).findall(host)[0]
if 'openload' in link:
try:
get_res=requests.get(link,headers=headers,timeout=5).content
rez = re.compile('description" content="(.+?)"',re.DOTALL).findall(get_res)[0]
if '1080' in rez:
qual = '1080p'
elif '720' in rez:
qual='720p'
else:
qual='SD'
except:qual='SD'
count +=1
self.sources.append({'source': 'Openload','quality': qual,'scraper': self.name,'url': link,'direct': False})
else:
hoster = link.split('//')[1].replace('www.','')
hoster = hoster.split('/')[0].lower()
if not filter_host(hoster):
continue
count +=1
self.sources.append({'source': hoster,'quality': 'DVD','scraper': self.name,'url': link,'direct': False})
if dev_log=='true':
end_time = time.time() - start_time
send_log(self.name,end_time,count,title,year, season=season,episode=episode)
except:
pass
| gpl-2.0 | -8,908,940,880,503,547,000 | 47.101695 | 147 | 0.511804 | false |
heiths/allura | Allura/allura/scripts/refresh_last_commits.py | 1 | 8903 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
from datetime import datetime
from contextlib import contextmanager
import faulthandler
from pylons import tmpl_context as c
from ming.orm import ThreadLocalORMSession, session
from allura import model as M
from allura.lib.utils import chunked_find
from allura.tasks.repo_tasks import refresh
from allura.scripts import ScriptTask
log = logging.getLogger(__name__)
class RefreshLastCommits(ScriptTask):
@classmethod
def parser(cls):
def _repo_type_list(s):
repo_types = []
for repo_type in s.split(','):
repo_type = repo_type.strip()
if repo_type not in ['git', 'hg']:
raise argparse.ArgumentTypeError(
'{0} is not a valid repo type.'.format(repo_type))
repo_types.append(repo_type)
return repo_types
parser = argparse.ArgumentParser(description='Using existing commit data, '
'refresh the last commit metadata in MongoDB. Run for all repos (no args), '
'or restrict by neighborhood, project, or code tool mount point.')
parser.add_argument('--nbhd', action='store', default='', dest='nbhd',
help='Restrict update to a particular neighborhood, e.g. /p/.')
parser.add_argument(
'--project', action='store', default='', dest='project',
help='Restrict update to a particular project. To specify a '
'subproject, use a slash: project/subproject.')
parser.add_argument('--project-regex', action='store', default='',
dest='project_regex',
help='Restrict update to projects for which the shortname matches '
'the provided regex.')
parser.add_argument(
'--repo-types', action='store', type=_repo_type_list,
default=['git', 'hg'], dest='repo_types',
help='Only refresh last commits for repos of the given type(s). Defaults to: '
'git,hg. Example: --repo-types=git')
parser.add_argument('--mount-point', default='', dest='mount_point',
help='Restrict update to repos at the given tool mount point. ')
parser.add_argument('--clean', action='store_true', dest='clean',
default=False, help='Remove last commit mongo docs for '
'project(s) being refreshed before doing the refresh.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
default=False, help='Log names of projects that would have their ')
parser.add_argument('--limit', action='store', type=int, dest='limit',
default=False, help='Limit of how many commits to process')
return parser
@classmethod
def execute(cls, options):
q_project = {}
if options.nbhd:
nbhd = M.Neighborhood.query.get(url_prefix=options.nbhd)
if not nbhd:
return "Invalid neighborhood url prefix."
q_project['neighborhood_id'] = nbhd._id
if options.project:
q_project['shortname'] = options.project
elif options.project_regex:
q_project['shortname'] = {'$regex': options.project_regex}
log.info('Refreshing last commit data')
for chunk in chunked_find(M.Project, q_project):
for p in chunk:
log.info("Refreshing last commit data for project '%s'." %
p.shortname)
if options.dry_run:
continue
c.project = p
if options.mount_point:
mount_points = [options.mount_point]
else:
mount_points = [ac.options.mount_point for ac in
M.AppConfig.query.find(dict(project_id=p._id))]
for app in (p.app_instance(mp) for mp in mount_points):
c.app = app
if not hasattr(app, 'repo'):
continue
if c.app.repo.tool.lower() not in options.repo_types:
log.info("Skipping %r: wrong type (%s)", c.app.repo,
c.app.repo.tool.lower())
continue
c.app.repo.status = 'analyzing'
session(c.app.repo).flush(c.app.repo)
try:
ci_ids = list(
reversed(list(c.app.repo.all_commit_ids())))
if options.clean:
cls._clean(ci_ids)
log.info('Refreshing all last commits in %r',
c.app.repo)
cls.refresh_repo_lcds(ci_ids, options)
new_commit_ids = app.repo.unknown_commit_ids()
if len(new_commit_ids) > 0:
refresh.post()
except:
log.exception('Error refreshing %r', c.app.repo)
raise
finally:
c.app.repo.status = 'ready'
session(c.app.repo).flush(c.app.repo)
ThreadLocalORMSession.flush_all()
@classmethod
def refresh_repo_lcds(cls, commit_ids, options):
model_cache = M.repository.ModelCache(
max_instances={M.repository.LastCommit: 4000},
max_queries={M.repository.LastCommit: 4000},
)
c.model_cache = model_cache
timings = []
print 'Processing last commits'
for i, commit_id in enumerate(commit_ids):
commit = M.repository.Commit.query.get(_id=commit_id)
if commit is None:
print "Commit missing, skipping: %s" % commit_id
continue
commit.set_context(c.app.repo)
with time(timings):
tree = commit.tree
cls._get_lcds(tree, model_cache)
ThreadLocalORMSession.flush_all()
if i % 100 == 0:
cls._print_stats(i, timings, 100)
if options.limit and i >= options.limit:
break
ThreadLocalORMSession.flush_all()
@classmethod
def _get_lcds(cls, tree, cache):
M.repository.LastCommit.get(tree)
"""
FIXME: if its needed to recurse into subdirectories, and compute their LCDs as well, something along these
lines should be enabled. This is not working as-is, and is not really necessary as this script doesn't
get used any more anyway.
for subtree in tree.tree_ids:
if subtree.name in tree.commit.changed_paths:
subtree_doc = cache.get(M.repository.Tree, dict(_id=subtree.id))
subtree_doc.set_context(tree)
cls._get_lcds(subtree_doc, cache)
"""
@classmethod
def _clean(cls, commit_ids):
# delete LastCommitDocs
i = M.repository.LastCommitDoc.m.find(
dict(commit_id={'$in': commit_ids})).count()
log.info("Deleting %i LastCommitDoc docs for %i commits...",
i, len(commit_ids))
M.repository.LastCommitDoc.m.remove(dict(commit_id={'$in': commit_ids}))
@classmethod
def _print_stats(cls, processed, timings, debug_step):
mt = max(timings)
tt = sum(timings)
at = tt / len(timings)
mat = sum(timings[-debug_step:]) / debug_step
print ' Processed %d commits (max: %f, avg: %f, mavg: %f, tot: %f)' % (
processed, mt, at, mat, tt)
@contextmanager
def time(timings):
s = datetime.utcnow()
yield
timings.append((datetime.utcnow() - s).total_seconds())
if __name__ == '__main__':
faulthandler.enable()
RefreshLastCommits.main()
| apache-2.0 | -5,485,001,930,387,302,000 | 42.857143 | 117 | 0.552398 | false |
inspirehep/invenio | modules/websubmit/lib/functions/Create_Modify_Interface.py | 12 | 15882 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is the Create_Modify_Interface function (along with its helpers).
It is used by WebSubmit for the "Modify Bibliographic Information" action.
"""
__revision__ = "$Id$"
import os
import re
import time
import pprint
import cgi
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionError
from invenio.websubmit_functions.Retrieve_Data import Get_Field
from invenio.errorlib import register_exception
from invenio.htmlutils import escape_javascript_string
from invenio.messages import gettext_set_language, wash_language
def Create_Modify_Interface_getfieldval_fromfile(cur_dir, fld=""):
"""Read a field's value from its corresponding text file in 'cur_dir' (if it exists) into memory.
Delete the text file after having read-in its value.
This function is called on the reload of the modify-record page. This way, the field in question
can be populated with the value last entered by the user (before reload), instead of always being
populated with the value still found in the DB.
"""
fld_val = ""
if len(fld) > 0 and os.access("%s/%s" % (cur_dir, fld), os.R_OK|os.W_OK):
fp = open( "%s/%s" % (cur_dir, fld), "r" )
fld_val = fp.read()
fp.close()
try:
os.unlink("%s/%s"%(cur_dir, fld))
except OSError:
# Cannot unlink file - ignore, let WebSubmit main handle this
pass
fld_val = fld_val.strip()
return fld_val
def Create_Modify_Interface_getfieldval_fromDBrec(fieldcode, recid):
"""Read a field's value from the record stored in the DB.
This function is called when the Create_Modify_Interface function is called for the first time
when modifying a given record, and field values must be retrieved from the database.
"""
fld_val = ""
if fieldcode != "":
for next_field_code in [x.strip() for x in fieldcode.split(",")]:
fld_val += "%s\n" % Get_Field(next_field_code, recid)
fld_val = fld_val.rstrip('\n')
return fld_val
def Create_Modify_Interface_transform_date(fld_val):
"""Accept a field's value as a string. If the value is a date in one of the following formats:
DD Mon YYYY (e.g. 23 Apr 2005)
YYYY-MM-DD (e.g. 2005-04-23)
...transform this date value into "DD/MM/YYYY" (e.g. 23/04/2005).
"""
if re.search("^[0-9]{2} [a-z]{3} [0-9]{4}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%d %b %Y"))
except (ValueError, TypeError):
# bad date format:
pass
elif re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", fld_val, re.IGNORECASE) is not None:
try:
fld_val = time.strftime("%d/%m/%Y", time.strptime(fld_val, "%Y-%m-%d"))
except (ValueError,TypeError):
# bad date format:
pass
return fld_val
def Create_Modify_Interface(parameters, curdir, form, user_info=None):
"""
Create an interface for the modification of a document, based on
the fields that the user has chosen to modify. This avoids having
to redefine a submission page for the modifications, but rely on
the elements already defined for the initial submission i.e. SBI
action (The only page that needs to be built for the modification
is the page letting the user specify a document to modify).
This function should be added at step 1 of your modification
workflow, after the functions that retrieves report number and
record id (Get_Report_Number, Get_Recid). Functions at step 2 are
the one executed upon successful submission of the form.
Create_Modify_Interface expects the following parameters:
* "fieldnameMBI" - the name of a text file in the submission
working directory that contains a list of the names of the
WebSubmit fields to include in the Modification interface.
These field names are separated by"\n" or "+".
* "prefix" - some content displayed before the main
modification interface. Can contain HTML (i.e. needs to be
pre-escaped). The prefix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "suffix" - some content displayed after the main modification
interface. Can contain HTML (i.e. needs to be
pre-escaped). The suffix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "button_label" - the label for the "END" button.
* "button_prefix" - some content displayed before the button to
submit the form. Can contain HTML (i.e. needs to be
pre-escaped). The prefix can make use of Python string
replacement for common values (such as 'rn'). Percent signs
(%) must consequently be escaped (with %%).
* "dates_conversion" - by default, values interpreted as dates
are converted to their 'DD/MM/YYYY' format, whenever
possible. Set another value for a different behaviour
(eg. 'none' for no conversion)
Given the list of WebSubmit fields to be included in the
modification interface, the values for each field are retrieved
for the given record (by way of each WebSubmit field being
configured with a MARC Code in the WebSubmit database). An HTML
FORM is then created. This form allows a user to modify certain
field values for a record.
The file referenced by 'fieldnameMBI' is usually generated from a
multiple select form field): users can then select one or several
fields to modify
Note that the function will display WebSubmit Response elements,
but will not be able to set an initial value: this must be done by
the Response element iteself.
Additionally the function creates an internal field named
'Create_Modify_Interface_DONE' on the interface, that can be
retrieved in curdir after the form has been submitted.
This flag is an indicator for the function that displayed values
should not be retrieved from the database, but from the submitted
values (in case the page is reloaded). You can also rely on this
value when building your WebSubmit Response element in order to
retrieve value either from the record, or from the submission
directory.
"""
ln = wash_language(form['ln'])
_ = gettext_set_language(ln)
global sysno,rn
t = ""
# variables declaration
fieldname = parameters['fieldnameMBI']
prefix = ''
suffix = ''
end_button_label = 'END'
end_button_prefix = ''
date_conversion_setting = ''
if parameters.has_key('prefix'):
prefix = parameters['prefix']
if parameters.has_key('suffix'):
suffix = parameters['suffix']
if parameters.has_key('button_label') and parameters['button_label']:
end_button_label = parameters['button_label']
if parameters.has_key('button_prefix'):
end_button_prefix = parameters['button_prefix']
if parameters.has_key('dates_conversion'):
date_conversion_setting = parameters['dates_conversion']
# Path of file containing fields to modify
the_globals = {
'doctype' : doctype,
'action' : action,
'act' : action, ## for backward compatibility
'step' : step,
'access' : access,
'ln' : ln,
'curdir' : curdir,
'uid' : user_info['uid'],
'uid_email' : user_info['email'],
'rn' : rn,
'last_step' : last_step,
'action_score' : action_score,
'__websubmit_in_jail__' : True,
'form': form,
'sysno': sysno,
'user_info' : user_info,
'__builtins__' : globals()['__builtins__'],
'Request_Print': Request_Print
}
if os.path.exists("%s/%s" % (curdir, fieldname)):
fp = open( "%s/%s" % (curdir, fieldname), "r" )
fieldstext = fp.read()
fp.close()
fieldstext = re.sub("\+","\n", fieldstext)
fields = fieldstext.split("\n")
else:
res = run_sql("SELECT fidesc FROM sbmFIELDDESC WHERE name=%s", (fieldname,))
if len(res) == 1:
fields = res[0][0].replace(" ", "")
fields = re.findall("<optionvalue=.*>", fields)
regexp = re.compile("""<optionvalue=(?P<quote>['|"]?)(?P<value>.*?)(?P=quote)""")
fields = [regexp.search(x) for x in fields]
fields = [x.group("value") for x in fields if x is not None]
fields = [x for x in fields if x not in ("Select", "select")]
else:
raise InvenioWebSubmitFunctionError("cannot find fields to modify")
#output some text
if not prefix:
t += "<center bgcolor=\"white\">The document <b>%s</b> has been found in the database.</center><br />Please modify the following fields:<br />Then press the '%s' button at the bottom of the page<br />\n" % \
(rn, cgi.escape(_(end_button_label)))
else:
t += prefix % the_globals
for field in fields:
subfield = ""
value = ""
marccode = ""
text = ""
# retrieve and display the modification text
t = t + "<FONT color=\"darkblue\">\n"
res = run_sql("SELECT modifytext FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res)>0:
t = t + "<small>%s</small> </FONT>\n" % res[0][0]
# retrieve the marc code associated with the field
res = run_sql("SELECT marccode FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
marccode = res[0][0]
# then retrieve the previous value of the field
if os.path.exists("%s/%s" % (curdir, "Create_Modify_Interface_DONE")):
# Page has been reloaded - get field value from text file on server, not from DB record
value = Create_Modify_Interface_getfieldval_fromfile(curdir, field)
else:
# First call to page - get field value from DB record
value = Create_Modify_Interface_getfieldval_fromDBrec(marccode, sysno)
if date_conversion_setting != 'none':
# If field is a date value, transform date into format DD/MM/YYYY:
value = Create_Modify_Interface_transform_date(value)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name=%s", (field,))
if len(res) > 0:
element_type = res[0][3]
numcols = res[0][6]
numrows = res[0][5]
size = res[0][4]
maxlength = res[0][7]
val = res[0][8]
fidesc = res[0][9]
if element_type == "T":
text = "<textarea name=\"%s\" rows=%s cols=%s wrap>%s</textarea>" % (field, numrows, numcols, cgi.escape(value))
elif element_type == "F":
text = "<input type=\"file\" name=\"%s\" size=%s maxlength=\"%s\">" % (field, size, maxlength)
elif element_type == "I":
text = "<input name=\"%s\" size=%s value=\"%s\"> " % (field, size, val and escape_javascript_string(val, escape_quote_for_html=True) or '')
text = text + '''<script type="text/javascript">/*<![CDATA[*/
document.forms[0].%s.value="%s";
/*]]>*/</script>''' % (field, escape_javascript_string(value, escape_for_html=False))
elif element_type == "H":
text = "<input type=\"hidden\" name=\"%s\" value=\"%s\">" % (field, val and escape_javascript_string(val, escape_quote_for_html=True) or '')
text = text + '''<script type="text/javascript">/*<![CDATA[*/
document.forms[0].%s.value="%s";
/*]]>*/</script>''' % (field, escape_javascript_string(value, escape_for_html=False))
elif element_type == "S":
values = re.split("[\n\r]+", value)
text = fidesc
if re.search("%s\[\]" % field, fidesc):
multipletext = "[]"
else:
multipletext = ""
if len(values) > 0 and not(len(values) == 1 and values[0] == ""):
text += '<script type="text/javascript">/*<![CDATA[*/\n'
text += "var i = 0;\n"
text += "el = document.forms[0].elements['%s%s'];\n" % (field, multipletext)
text += "max = el.length;\n"
for val in values:
text += "var found = 0;\n"
text += "var i=0;\n"
text += "while (i != max) {\n"
text += " if (el.options[i].value == \"%s\" || el.options[i].text == \"%s\") {\n" % \
(escape_javascript_string(val, escape_for_html=False), escape_javascript_string(val, escape_for_html=False))
text += " el.options[i].selected = true;\n"
text += " found = 1;\n"
text += " }\n"
text += " i=i+1;\n"
text += "}\n"
#text += "if (found == 0) {\n"
#text += " el[el.length] = new Option(\"%s\", \"%s\", 1,1);\n"
#text += "}\n"
text += "/*]]>*/</script>\n"
elif element_type == "D":
text = fidesc
elif element_type == "R":
try:
co = compile(fidesc.replace("\r\n", "\n"), "<string>", "exec")
## Note this exec is safe WRT global variable because the
## Create_Modify_Interface has already been parsed by
## execfile within a protected environment.
the_globals['text'] = ''
exec co in the_globals
text = the_globals['text']
except:
msg = "Error in evaluating response element %s with globals %s" % (pprint.pformat(field), pprint.pformat(globals()))
register_exception(req=None, alert_admin=True, prefix=msg)
raise InvenioWebSubmitFunctionError(msg)
else:
text = "%s: unknown field type" % field
t = t + "<small>%s</small>" % text
# output our flag field
t += '<input type="hidden" name="Create_Modify_Interface_DONE" value="DONE\n" />'
t += '<br />'
if end_button_prefix:
t += end_button_prefix % the_globals
# output some more text
t += "<br /><CENTER><small><INPUT type=\"button\" width=400 height=50 name=\"End\" value=\"%(end_button_label)s\" onClick=\"document.forms[0].step.value = 2;user_must_confirm_before_leaving_page = false;document.forms[0].submit();\"></small></CENTER></H4>" % {'end_button_label': escape_javascript_string(_(end_button_label), escape_quote_for_html=True)}
if suffix:
t += suffix % the_globals
return t
| gpl-2.0 | -4,709,867,277,222,334,000 | 46.408955 | 358 | 0.590102 | false |
HydrelioxGitHub/home-assistant | homeassistant/components/mailbox/__init__.py | 3 | 8027 | """Support for Voice mailboxes."""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_prepare_setup_platform
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
DOMAIN = 'mailbox'
EVENT = 'mailbox_updated'
CONTENT_TYPE_MPEG = 'audio/mpeg'
CONTENT_TYPE_NONE = 'none'
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass, config):
"""Track states and offer events for mailboxes."""
mailboxes = []
await hass.components.frontend.async_register_built_in_panel(
'mailbox', 'mailbox', 'mdi:mailbox')
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, 'async_get_handler'):
mailbox = await \
platform.async_get_handler(hass, p_config, discovery_info)
elif hasattr(platform, 'get_handler'):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error(
"Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
await component.async_add_entities([mailbox_entity])
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
await asyncio.wait(setup_tasks, loop=hass.loop)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox):
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
pass
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes):
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request):
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
'name': mailbox.name,
'has_media': mailbox.has_media,
'can_delete': mailbox.can_delete
})
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
hass = request.app['hass']
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10, loop=hass.loop):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
error_msg = "Error getting media: %s" % (err)
_LOGGER.error(error_msg)
return web.Response(status=500)
if stream:
return web.Response(body=stream,
content_type=mailbox.media_type)
return web.Response(status=500)
| apache-2.0 | -7,247,475,546,684,341,000 | 30.112403 | 79 | 0.626012 | false |
mattcongy/itshop | docker-images/taigav2/taiga-back/taiga/projects/notifications/permissions.py | 2 | 1306 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import (TaigaResourcePermission, IsAuthenticated)
class NotifyPolicyPermission(TaigaResourcePermission):
retrieve_perms = IsAuthenticated()
create_perms = IsAuthenticated()
update_perms = IsAuthenticated()
partial_update_perms = IsAuthenticated()
destroy_perms = IsAuthenticated()
list_perms = IsAuthenticated()
| mit | -6,060,777,454,152,080,000 | 45.571429 | 81 | 0.760736 | false |
reddit/cabot | cabot/cabotapp/tasks.py | 13 | 3908 | import os
import os.path
import sys
import random
import logging
from celery import Celery
from celery._state import set_default_app
from celery.task import task
from django.conf import settings
from django.utils import timezone
celery = Celery(__name__)
celery.config_from_object(settings)
# Celery should set this app as the default, however the 'celery.current_app'
# api uses threadlocals, so code running in different threads/greenlets uses
# the fallback default instead of this app when no app is specified. This
# causes confusing connection errors when celery tries to connect to a
# non-existent rabbitmq server. It seems to happen mostly when using the
# 'celery.canvas' api. To get around this, we use the internal 'celery._state'
# api to force our app to be the default.
set_default_app(celery)
logger = logging.getLogger(__name__)
@task(ignore_result=True)
def run_status_check(check_or_id):
from .models import StatusCheck
if not isinstance(check_or_id, StatusCheck):
check = StatusCheck.objects.get(id=check_or_id)
else:
check = check_or_id
# This will call the subclass method
check.run()
@task(ignore_result=True)
def run_all_checks():
from .models import StatusCheck
from datetime import timedelta, datetime
checks = StatusCheck.objects.all()
seconds = range(60)
for check in checks:
if check.last_run:
next_schedule = check.last_run + timedelta(minutes=check.frequency)
if (not check.last_run) or timezone.now() > next_schedule:
delay = random.choice(seconds)
logger.debug('Scheduling task for %s seconds from now' % delay)
run_status_check.apply_async((check.id,), countdown=delay)
@task(ignore_result=True)
def update_services(ignore_result=True):
# Avoid importerrors and the like from legacy scheduling
return
@task(ignore_result=True)
def update_service(service_or_id):
from .models import Service
if not isinstance(service_or_id, Service):
service = Service.objects.get(id=service_or_id)
else:
service = service_or_id
service.update_status()
@task(ignore_result=True)
def update_instance(instance_or_id):
from .models import Instance
if not isinstance(instance_or_id, Instance):
instance = Instance.objects.get(id=instance_or_id)
else:
instance = instance_or_id
instance.update_status()
@task(ignore_result=True)
def update_shifts():
from .models import update_shifts as _update_shifts
_update_shifts()
@task(ignore_result=True)
def clean_db(days_to_retain=60):
"""
Clean up database otherwise it gets overwhelmed with StatusCheckResults.
To loop over undeleted results, spawn new tasks to make sure db connection closed etc
"""
from .models import StatusCheckResult, ServiceStatusSnapshot
from datetime import timedelta
to_discard_results = StatusCheckResult.objects.filter(time__lte=timezone.now()-timedelta(days=days_to_retain))
to_discard_snapshots = ServiceStatusSnapshot.objects.filter(time__lte=timezone.now()-timedelta(days=days_to_retain))
result_ids = to_discard_results.values_list('id', flat=True)[:100]
snapshot_ids = to_discard_snapshots.values_list('id', flat=True)[:100]
if not result_ids:
logger.info('Completed deleting StatusCheckResult objects')
if not snapshot_ids:
logger.info('Completed deleting ServiceStatusSnapshot objects')
if (not snapshot_ids) and (not result_ids):
return
logger.info('Processing %s StatusCheckResult objects' % len(result_ids))
logger.info('Processing %s ServiceStatusSnapshot objects' % len(snapshot_ids))
StatusCheckResult.objects.filter(id__in=result_ids).delete()
ServiceStatusSnapshot.objects.filter(id__in=snapshot_ids).delete()
clean_db.apply_async(kwargs={'days_to_retain': days_to_retain}, countdown=3)
| mit | 8,869,872,801,440,219,000 | 32.689655 | 120 | 0.719294 | false |
mingit/mstcp | arch/sim/generate-linker-script.py | 5 | 1254 | #!/usr/bin/env python
import re
def linker_script(reading, writing):
delim = re.compile('^==')
end_of_ro = re.compile('^ *.gcc_except_table[^:]*:[ ]*ONLY_IF_RW')
skipping = True
for line in reading.readlines():
if delim.search (line) is not None:
if skipping:
skipping = False
continue
else:
skipping = True
if skipping:
continue
m = end_of_ro.search(line)
if m is not None:
skipping = False
initcall = """
/* taken from kernel script*/
. = ALIGN (CONSTANT (MAXPAGESIZE));
.initcall.init : AT(ADDR(.initcall.init)) {
__initcall_start = .;
*(.initcall0.init)
*(.initcall0s.init)
*(.initcall1.init)
*(.initcall1s.init)
*(.initcall2.init)
*(.initcall2s.init)
*(.initcall3.init)
*(.initcall3s.init)
*(.initcall4.init)
*(.initcall4s.init)
*(.initcall5.init)
*(.initcall5s.init)
*(.initcall6.init)
*(.initcall6s.init)
*(.initcall7.init)
*(.initcall7s.init)
__initcall_end = .;
}
"""
writing.write (initcall)
writing.write(line)
import sys
linker_script (sys.stdin, sys.stdout)
| gpl-2.0 | 4,640,045,848,494,772,000 | 24.591837 | 70 | 0.535885 | false |
rail/releasetasks | releasetasks/test/desktop/test_mark_as_shipped.py | 3 | 2603 | import unittest
from releasetasks.test.desktop import make_task_graph, do_common_assertions, \
get_task_by_name, create_firefox_test_args
from releasetasks.test import generate_scope_validator, PVT_KEY_FILE, verify
from voluptuous import Schema, truth
class TestMarkAsShipped(unittest.TestCase):
maxDiff = 30000
graph = None
task = None
human_task = None
payload = None
def setUp(self):
self.graph_schema = Schema({
'scopes': generate_scope_validator(scopes={"queue:task-priority:high"})
}, extra=True, required=True)
self.task_schema = Schema({
'task': {
'provisionerId': 'buildbot-bridge',
'workerType': 'buildbot-bridge',
'payload': {
'properties': {
'repo_path': 'releases/foo',
'script_repo_revision': 'abcd',
'next_version': '42.0b3',
}
}
}
}, extra=True, required=True)
self.human_task_schema = Schema({
'task': {
'provisionerId': 'null-provisioner',
'workerType': 'human-decision',
}
}, extra=True, required=True)
test_kwargs = create_firefox_test_args({
'bouncer_enabled': True,
'postrelease_mark_as_shipped_enabled': True,
'signing_pvt_key': PVT_KEY_FILE,
'final_verify_channels': ['foo'],
'release_channels': ['foo'],
'en_US_config': {
"platforms": {
"macosx64": {},
"win32": {},
"win64": {},
"linux": {},
"linux64": {},
}
},
})
self.graph = make_task_graph(**test_kwargs)
self.task = get_task_by_name(self.graph, "release-foo-firefox_mark_as_shipped")
self.human_task = get_task_by_name(self.graph, "publish_release_human_decision")
def generate_dependency_validator(self):
@truth
def validate_dependency(task):
return self.human_task['taskId'] in task['requires']
return validate_dependency
def test_common_assertions(self):
do_common_assertions(self.graph)
def test_task(self):
verify(self.task, self.task_schema, self.generate_dependency_validator())
def test_human_task(self):
verify(self.human_task, self.human_task_schema)
def test_graph(self):
verify(self.graph, self.graph_schema)
| mpl-2.0 | 1,252,419,546,497,769,500 | 31.5375 | 88 | 0.532847 | false |
MOA-2011/enigma2.pli4.0 | lib/python/Plugins/SystemPlugins/Videomode/plugin.py | 6 | 8967 | from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from Components.SystemInfo import SystemInfo
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, config, ConfigBoolean, ConfigNothing, ConfigSlider
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from VideoHardware import video_hw
config.misc.videowizardenabled = ConfigBoolean(default = True)
class VideoSetup(Screen, ConfigListScreen):
def __init__(self, session, hw):
Screen.__init__(self, session)
# for the skin: first try VideoSetup, then Setup, this allows individual skinning
self.skinName = ["VideoSetup", "Setup" ]
self.setup_title = _("A/V settings")
self.hw = hw
self.onChangedEntry = [ ]
# handle hotplug by re-creating setup
self.onShow.append(self.startHotplug)
self.onHide.append(self.stopHotplug)
self.list = [ ]
ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry)
from Components.ActionMap import ActionMap
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["description"] = Label("")
self.createSetup()
self.grabLastGoodMode()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def startHotplug(self):
self.hw.on_hotplug.append(self.createSetup)
def stopHotplug(self):
self.hw.on_hotplug.remove(self.createSetup)
def createSetup(self):
level = config.usage.setup_level.index
self.list = [
getConfigListEntry(_("Video output"), config.av.videoport, _("Configures which video output connector will be used."))
]
# if we have modes for this port:
if config.av.videoport.value in config.av.videomode:
# add mode- and rate-selection:
self.list.append(getConfigListEntry(pgettext("Video output mode", "Mode"), config.av.videomode[config.av.videoport.value], _("Configure the video output mode (or resolution).")))
if config.av.videomode[config.av.videoport.value].value == 'PC':
self.list.append(getConfigListEntry(_("Resolution"), config.av.videorate[config.av.videomode[config.av.videoport.value].value], _("Configure the screen resolution in PC output mode.")))
else:
self.list.append(getConfigListEntry(_("Refresh rate"), config.av.videorate[config.av.videomode[config.av.videoport.value].value], _("Configure the refresh rate of the screen.")))
port = config.av.videoport.value
if port not in config.av.videomode:
mode = None
else:
mode = config.av.videomode[port].value
# some modes (720p, 1080i) are always widescreen. Don't let the user select something here, "auto" is not what he wants.
force_wide = self.hw.isWidescreenMode(port, mode)
if not force_wide:
self.list.append(getConfigListEntry(_("Aspect ratio"), config.av.aspect, _("Configure the aspect ratio of the screen.")))
if force_wide or config.av.aspect.value in ("16_9", "16_10"):
self.list.extend((
getConfigListEntry(_("Display 4:3 content as"), config.av.policy_43, _("When the content has an aspect ratio of 4:3, choose whether to scale/stretch the picture.")),
getConfigListEntry(_("Display >16:9 content as"), config.av.policy_169, _("When the content has an aspect ratio of 16:9, choose whether to scale/stretch the picture."))
))
elif config.av.aspect.value == "4_3":
self.list.append(getConfigListEntry(_("Display 16:9 content as"), config.av.policy_169, _("When the content has an aspect ratio of 16:9, choose whether to scale/stretch the picture.")))
# if config.av.videoport.value == "DVI":
# self.list.append(getConfigListEntry(_("Allow Unsupported Modes"), config.av.edid_override))
if config.av.videoport.value == "Scart":
self.list.append(getConfigListEntry(_("Color format"), config.av.colorformat, _("Configure which color format should be used on the SCART output.")))
if level >= 1:
self.list.append(getConfigListEntry(_("WSS on 4:3"), config.av.wss, _("When enabled, content with an aspect ratio of 4:3 will be stretched to fit the screen.")))
if SystemInfo["ScartSwitch"]:
self.list.append(getConfigListEntry(_("Auto scart switching"), config.av.vcrswitch, _("When enabled, your receiver will detect activity on the VCR SCART input.")))
if level >= 1:
if SystemInfo["CanDownmixAC3"]:
self.list.append(getConfigListEntry(_("AC3/DTS downmix"), config.av.downmix_ac3, _("Configure whether multi channel sound tracks should be downmixed to stereo.")))
self.list.extend((
getConfigListEntry(_("General AC3 delay"), config.av.generalAC3delay, _("Configure the general audio delay of Dolby Digital sound tracks.")),
getConfigListEntry(_("General PCM delay"), config.av.generalPCMdelay, _("Configure the general audio delay of stereo sound tracks."))
))
if SystemInfo["CanChangeOsdAlpha"]:
self.list.append(getConfigListEntry(_("OSD transparency"), config.av.osd_alpha, _("Configure the transparency of the OSD.")))
if not isinstance(config.av.scaler_sharpness, ConfigNothing):
self.list.append(getConfigListEntry(_("Scaler sharpness"), config.av.scaler_sharpness, _("Configure the sharpness of the video scaling.")))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.createSetup()
def confirm(self, confirmed):
if not confirmed:
config.av.videoport.value = self.last_good[0]
config.av.videomode[self.last_good[0]].value = self.last_good[1]
config.av.videorate[self.last_good[1]].value = self.last_good[2]
self.hw.setMode(*self.last_good)
else:
self.keySave()
def grabLastGoodMode(self):
port = config.av.videoport.value
mode = config.av.videomode[port].value
rate = config.av.videorate[mode].value
self.last_good = (port, mode, rate)
def apply(self):
port = config.av.videoport.value
mode = config.av.videomode[port].value
rate = config.av.videorate[mode].value
if (port, mode, rate) != self.last_good:
self.hw.setMode(port, mode, rate)
from Screens.MessageBox import MessageBox
self.session.openWithCallback(self.confirm, MessageBox, _("Is this video mode ok?"), MessageBox.TYPE_YESNO, timeout = 20, default = False)
else:
self.keySave()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def getCurrentDescription(self):
return self["config"].getCurrent() and len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2] or ""
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class VideomodeHotplug:
def __init__(self, hw):
self.hw = hw
def start(self):
self.hw.on_hotplug.append(self.hotplug)
def stop(self):
self.hw.on_hotplug.remove(self.hotplug)
def hotplug(self, what):
print "hotplug detected on port '%s'" % (what)
port = config.av.videoport.value
mode = config.av.videomode[port].value
rate = config.av.videorate[mode].value
if not self.hw.isModeAvailable(port, mode, rate):
print "mode %s/%s/%s went away!" % (port, mode, rate)
modelist = self.hw.getModeList(port)
if not len(modelist):
print "sorry, no other mode is available (unplug?). Doing nothing."
return
mode = modelist[0][0]
rate = modelist[0][1]
print "setting %s/%s/%s" % (port, mode, rate)
self.hw.setMode(port, mode, rate)
hotplug = None
def startHotplug():
global hotplug, video_hw
hotplug = VideomodeHotplug(video_hw)
hotplug.start()
def stopHotplug():
global hotplug
hotplug.stop()
def autostart(reason, session = None, **kwargs):
if session is not None:
global my_global_session
my_global_session = session
return
if reason == 0:
startHotplug()
elif reason == 1:
stopHotplug()
def videoSetupMain(session, **kwargs):
session.open(VideoSetup, video_hw)
def startSetup(menuid):
if menuid != "system":
return [ ]
return [(_("A/V settings"), videoSetupMain, "av_setup", 40)]
def VideoWizard(*args, **kwargs):
from VideoWizard import VideoWizard
return VideoWizard(*args, **kwargs)
def Plugins(**kwargs):
list = [
# PluginDescriptor(where = [PluginDescriptor.WHERE_SESSIONSTART, PluginDescriptor.WHERE_AUTOSTART], fnc = autostart),
PluginDescriptor(name=_("Video setup"), description=_("Advanced video setup"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=startSetup)
]
if config.misc.videowizardenabled.value:
list.append(PluginDescriptor(name=_("Video wizard"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = False, fnc=(0, VideoWizard)))
return list
| gpl-2.0 | 6,398,492,496,926,176,000 | 36.518828 | 189 | 0.721535 | false |
sinisterchipmunk/tomato | ext/tomato/external/scons/engine/SCons/Tool/CVS.py | 34 | 2910 | """SCons.Tool.CVS.py
Tool-specific initialization for CVS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/CVS.py 5023 2010/06/14 22:05:46 scons"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
CVS to an Environment."""
def CVSFactory(repos, module='', env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The CVS() factory is deprecated and there is no replacement.""")
# fail if repos is not an absolute path name?
if module != '':
# Don't use os.path.join() because the name we fetch might
# be across a network and must use POSIX slashes as separators.
module = module + '/'
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS -d ${TARGET.dir} $CVSMODULE${TARGET.posix}'
act = SCons.Action.Action('$CVSCOM', '$CVSCOMSTR')
return SCons.Builder.Builder(action = act,
env = env,
CVSREPOSITORY = repos,
CVSMODULE = module)
#setattr(env, 'CVS', CVSFactory)
env.CVS = CVSFactory
env['CVS'] = 'cvs'
env['CVSFLAGS'] = SCons.Util.CLVar('-d $CVSREPOSITORY')
env['CVSCOFLAGS'] = SCons.Util.CLVar('')
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS ${TARGET.posix}'
def exists(env):
return env.Detect('cvs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 1,930,460,354,458,270,500 | 38.863014 | 113 | 0.675945 | false |
Storyyeller/Krakatau | Krakatau/assembler/disassembly.py | 1 | 32021 | from __future__ import print_function
import collections
import math
import re
from ..classfileformat import classdata, mutf8
from ..classfileformat.reader import Reader, TruncatedStreamError
from ..util.thunk import thunk
from . import codes, token_regexes
from . import flags
from .instructions import OPNAMES, OP_CLS, OP_FMIM, OP_LBL, OP_NONE, OP_SHORT
MAX_INLINE_SIZE = 300
MAX_INDENT = 20
WORD_REGEX = re.compile(token_regexes.WORD + r'\Z')
PREFIXES = {'Utf8': 'u', 'Class': 'c', 'String': 's', 'Field': 'f', 'Method': 'm', 'InterfaceMethod': 'im', 'NameAndType': 'nat', 'MethodHandle': 'mh', 'MethodType': 'mt', 'Dynamic': 'dc', 'InvokeDynamic': 'id'}
class DisassemblyError(Exception):
pass
def reprbytes(b):
# repr will have b in python3 but not python2
return 'b' + repr(b).lstrip('b')
def isword(s):
try:
s = s.decode('ascii')
except UnicodeDecodeError:
return False
return WORD_REGEX.match(s) and s not in flags.FLAGS
def format_string(s):
try:
u = mutf8.decode(s)
except UnicodeDecodeError:
print('Warning, invalid utf8 data!')
else:
if mutf8.encode(u) == s:
return repr(u).lstrip('u')
return reprbytes(s)
def make_signed(x, bits):
if x >= (1 << (bits - 1)):
x -= 1 << bits
return x
class StackMapReader(object):
def __init__(self):
self.stream = None
self.tag = -1
self.pos = -1
self.count = 0
self.valid = True
def setdata(self, r):
if self.stream is None:
self.stream = r
self.count = self.u16() + 1
self.parseNextPos()
else:
# Multiple StackMapTable attributes in same Code attribute
self.valid = False
def parseNextPos(self):
self.count -= 1
if self.count > 0:
self.tag = tag = self.u8()
if tag <= 127: # same and stack_1
delta = tag % 64
else: # everything else has 16bit delta field
delta = self.u16()
self.pos += delta + 1
def u8(self):
try:
return self.stream.u8()
except TruncatedStreamError:
self.valid = False
return 0
def u16(self):
try:
return self.stream.u16()
except TruncatedStreamError:
self.valid = False
return 0
class ReferencePrinter(object):
def __init__(self, clsdata, roundtrip):
self.roundtrip = roundtrip
self.cpslots = clsdata.pool.slots
for attr in clsdata.getattrs(b'BootstrapMethods'):
self.bsslots = classdata.BootstrapMethodsData(attr.stream()).slots
break
else:
self.bsslots = []
# CP index 0 should always be a raw reference. Additionally, there is one case where exact
# references are significant due to a bug in the JVM. In the InnerClasses attribute,
# specifying the same index for inner and outer class will fail verification, but specifying
# different indexes which point to identical class entries will pass. In this case, we force
# references to those indexes to be raw, so they don't get merged and break the class.
self.forcedraw = set()
for attr in clsdata.getattrs(b'InnerClasses'):
r = attr.stream()
for _ in range(r.u16()):
inner, outer, _, _ = r.u16(), r.u16(), r.u16(), r.u16()
if inner != outer and clsdata.pool.getclsutf(inner) == clsdata.pool.getclsutf(outer):
self.forcedraw.add(inner)
self.forcedraw.add(outer)
self.explicit_forcedraw = self.forcedraw.copy()
# For invalid cp indices, just output raw ref instead of throwing (including 0)
for i, slot in enumerate(self.cpslots):
if slot.tag is None:
self.forcedraw.add(i)
self.forcedraw.update(range(len(self.cpslots), 65536))
self.used = set()
self.encoded = {}
self.utfcounts = {}
def _float_or_double(self, x, nmbits, nebits, suffix, nanfmt):
nbits = nmbits + nebits + 1
assert nbits % 32 == 0
sbit, ebits, mbits = x >> (nbits - 1), (x >> nmbits) % (1 << nebits), x % (1 << nmbits)
if ebits == (1 << nebits) - 1:
result = 'NaN' if mbits else 'Infinity'
if self.roundtrip and mbits:
result += nanfmt.format(x)
elif ebits == 0 and mbits == 0:
result = '0.0'
else:
ebias = (1 << (nebits - 1)) - 1
exponent = ebits - ebias - nmbits
mantissa = mbits
if ebits > 0:
mantissa += 1 << nmbits
else:
exponent += 1
if self.roundtrip:
result = '0x{:X}p{}'.format(mantissa, exponent)
else:
result = repr(math.ldexp(mantissa, exponent))
return '+-'[sbit] + result + suffix
def _int(self, x): return str(make_signed(x, 32))
def _long(self, x): return str(make_signed(x, 64)) + 'L'
def _float(self, x): return self._float_or_double(x, 23, 8, 'f', '<0x{:08X}>')
def _double(self, x): return self._float_or_double(x, 52, 11, '', '<0x{:016X}>')
def _encode_utf(self, ind, wordok=True):
try:
return self.encoded[ind][wordok]
except KeyError:
s = self.cpslots[ind].data
string = format_string(s)
word = s.decode('utf8') if isword(s) else string
self.encoded[ind] = [string, word]
return word if wordok else string
def rawref(self, ind, isbs=False):
return '[{}{}]'.format('bs:' if isbs else '', ind)
def symref(self, ind, isbs=False):
self.used.add((ind, isbs))
if isbs:
return '[bs:_{}]'.format(ind)
prefix = PREFIXES.get(self.cpslots[ind].tag, '_')
return '[{}{}]'.format(prefix, ind)
def ref(self, ind, isbs=False):
if self.roundtrip or not isbs and ind in self.forcedraw:
return self.rawref(ind, isbs)
return self.symref(ind, isbs)
def _ident(self, ind, wordok=True):
if self.cpslots[ind].tag == 'Utf8':
val = self._encode_utf(ind, wordok=wordok)
if len(val) < MAX_INLINE_SIZE:
if len(val) < 50 or self.utfcounts.get(ind, 0) < 10:
self.utfcounts[ind] = 1 + self.utfcounts.get(ind, 0)
return val
def utfref(self, ind):
if self.roundtrip or ind in self.forcedraw:
return self.rawref(ind)
temp = self._ident(ind)
if temp is not None:
return temp
return self.symref(ind)
def clsref(self, ind, tag='Class'):
assert tag in 'Class Module Package'.split()
if self.roundtrip or ind in self.forcedraw:
return self.rawref(ind)
if self.cpslots[ind].tag == tag:
ind2 = self.cpslots[ind].refs[0]
temp = self._ident(ind2)
if temp is not None:
return temp
return self.symref(ind)
def natref(self, ind):
if self.roundtrip or ind in self.forcedraw:
return self.rawref(ind)
if self.cpslots[ind].tag == 'NameAndType':
ind2, ind3 = self.cpslots[ind].refs
temp = self._ident(ind2)
if temp is not None:
return temp + ' ' + self.utfref(ind3)
return self.symref(ind)
def fmimref(self, ind):
if self.roundtrip or ind in self.forcedraw:
return self.rawref(ind)
if self.cpslots[ind].tag in ['Field', 'Method', 'InterfaceMethod']:
ind2, ind3 = self.cpslots[ind].refs
return ' '.join([self.cpslots[ind].tag, self.clsref(ind2), self.natref(ind3)])
return self.symref(ind)
def mhnotref(self, ind):
slot = self.cpslots[ind]
return codes.handle_rcodes[slot.data] + ' ' + self.taggedref(slot.refs[0], allowed=['Field', 'Method', 'InterfaceMethod'])
def taggedconst(self, ind):
slot = self.cpslots[ind]
if slot.tag == 'Utf8':
parts = [self._encode_utf(ind)]
elif slot.tag == 'Int':
parts = [self._int(slot.data)]
elif slot.tag == 'Float':
parts = [self._float(slot.data)]
elif slot.tag == 'Long':
parts = [self._long(slot.data)]
elif slot.tag == 'Double':
parts = [self._double(slot.data)]
elif slot.tag in ['Class', 'String', 'MethodType', 'Module', 'Package']:
parts = [self.utfref(slot.refs[0])]
elif slot.tag in ['Field', 'Method', 'InterfaceMethod']:
parts = [self.clsref(slot.refs[0]), self.natref(slot.refs[1])]
elif slot.tag == 'NameAndType':
parts = [self.utfref(slot.refs[0]), self.utfref(slot.refs[1])]
elif slot.tag == 'MethodHandle':
parts = [self.mhnotref(ind)]
elif slot.tag in ['InvokeDynamic', 'Dynamic']:
parts = [self.bsref(slot.refs[0]), self.natref(slot.refs[1])]
parts.insert(0, slot.tag)
return ' '.join(parts)
def taggedref(self, ind, allowed=None):
if self.roundtrip or ind in self.forcedraw:
return self.rawref(ind)
if allowed is None or self.cpslots[ind].tag in allowed:
temp = self.taggedconst(ind)
if len(temp) < MAX_INLINE_SIZE:
return temp
return self.symref(ind)
def ldcrhs(self, ind):
if self.roundtrip or ind in self.forcedraw:
return self.rawref(ind)
slot = self.cpslots[ind]
t = slot.tag
if t == 'Int':
return self._int(slot.data)
elif slot.tag == 'Float':
return self._float(slot.data)
elif slot.tag == 'Long':
return self._long(slot.data)
elif slot.tag == 'Double':
return self._double(slot.data)
elif t == 'String':
ind2 = self.cpslots[ind].refs[0]
temp = self._ident(ind2, wordok=False)
if temp is not None:
return temp
return self.symref(ind)
return self.taggedref(ind, allowed=['Class', 'MethodHandle', 'MethodType', 'Dynamic'])
def bsnotref(self, ind, tagged=False):
slot = self.bsslots[ind]
parts = []
if tagged:
parts.append('Bootstrap')
if tagged and self.roundtrip:
parts.append(self.rawref(slot.refs[0]))
else:
parts.append(self.mhnotref(slot.refs[0]))
for bsarg in slot.refs[1:]:
# Force Dynamic arguments to be references in order to avoid infinite loops
parts.append(self.taggedref(bsarg, allowed=['Int', 'Long', 'Float', 'Double', 'String', 'Class', 'MethodHandle', 'MethodType']))
parts.append(':')
return ' '.join(parts)
def bsref(self, ind):
if self.roundtrip:
return self.rawref(ind, isbs=True)
return self.bsnotref(ind)
LabelInfos = collections.namedtuple('LabelInfos', 'defined used')
class Disassembler(object):
def __init__(self, clsdata, out, roundtrip):
self.roundtrip = roundtrip
self.out = out
self.cls = clsdata
self.pool = clsdata.pool
self.indentlevel = 0
self.labels = None
self.refprinter = ReferencePrinter(clsdata, roundtrip)
def _getattr(a, obj, name):
for attr in obj.attributes:
if a.pool.getutf(attr.name) == name:
return attr
def sol(a, text=''):
level = min(a.indentlevel, MAX_INDENT) * 4
text += ' ' * (level - len(text))
a.out(text)
def eol(a): a.out('\n')
def val(a, s): a.out(s + ' ')
def int(a, x): a.val(str(x))
def lbl(a, x):
a.labels.used.add(x)
a.val('L{}'.format(x))
def try_lbl(a, x):
if a.labels is None or x not in a.labels.defined:
raise DisassemblyError()
a.lbl(x)
###########################################################################
def extrablankline(a): a.eol()
def ref(a, ind, isbs=False): a.val(a.refprinter.ref(ind, isbs))
def utfref(a, ind): a.val(a.refprinter.utfref(ind))
def clsref(a, ind, tag='Class'): a.val(a.refprinter.clsref(ind, tag))
def natref(a, ind): a.val(a.refprinter.natref(ind))
def fmimref(a, ind): a.val(a.refprinter.fmimref(ind))
def taggedbs(a, ind): a.val(a.refprinter.bsnotref(ind, tagged=True))
def taggedconst(a, ind): a.val(a.refprinter.taggedconst(ind))
def taggedref(a, ind): a.val(a.refprinter.taggedref(ind))
def ldcrhs(a, ind): a.val(a.refprinter.ldcrhs(ind))
def flags(a, access, names):
for i in range(16):
if access & (1 << i):
a.val(names[1 << i])
###########################################################################
### Top level stuff (class, const defs, fields, methods) ##################
def disassemble(a):
cls = a.cls
a.val('.version'), a.int(cls.version[0]), a.int(cls.version[1]), a.eol()
a.val('.class'), a.flags(cls.access, flags.RFLAGS_CLASS), a.clsref(cls.this), a.eol()
a.val('.super'), a.clsref(cls.super), a.eol()
for ref in cls.interfaces:
a.val('.implements'), a.clsref(ref), a.eol()
for f in cls.fields:
a.field(f)
for m in cls.methods:
a.method(m)
for attr in cls.attributes:
a.attribute(attr)
a.constdefs()
a.val('.end class'), a.eol()
def field(a, f):
a.val('.field'), a.flags(f.access, flags.RFLAGS_FIELD), a.utfref(f.name), a.utfref(f.desc)
attrs = f.attributes[:]
cvattr = a._getattr(f, b'ConstantValue')
if cvattr and not a.roundtrip:
a.val('='), a.ldcrhs(cvattr.stream().u16())
attrs.remove(cvattr)
if attrs:
a.val('.fieldattributes'), a.eol()
a.indentlevel += 1
for attr in attrs:
a.attribute(attr)
a.indentlevel -= 1
a.val('.end fieldattributes')
a.eol()
def method(a, m):
a.extrablankline()
a.val('.method'), a.flags(m.access, flags.RFLAGS_METHOD), a.utfref(m.name), a.val(':'), a.utfref(m.desc), a.eol()
a.indentlevel += 1
for attr in m.attributes:
a.attribute(attr, in_method=True)
a.indentlevel -= 1
a.val('.end method'), a.eol()
def constdefs(a):
if a.roundtrip:
for ind in range(len(a.refprinter.cpslots)):
a.constdef(ind, False)
for ind in range(len(a.refprinter.bsslots)):
a.constdef(ind, True)
else:
assert not a.refprinter.used & a.refprinter.forcedraw
for ind in sorted(a.refprinter.explicit_forcedraw):
a.constdef(ind, False)
done = set()
while len(done) < len(a.refprinter.used):
for ind, isbs in sorted(a.refprinter.used - done):
a.constdef(ind, isbs)
done.add((ind, isbs))
def constdef(a, ind, isbs):
if not isbs and a.refprinter.cpslots[ind].tag is None:
return
a.sol(), a.val('.bootstrap' if isbs else '.const'), a.ref(ind, isbs), a.val('=')
if isbs:
a.taggedbs(ind)
else:
a.taggedconst(ind)
a.eol()
###########################################################################
### Bytecode ##############################################################
def code(a, r):
c = classdata.CodeData(r, a.pool, a.cls.version < (45, 3))
a.val('.code'), a.val('stack'), a.int(c.stack), a.val('locals'), a.int(c.locals), a.eol()
a.indentlevel += 1
assert a.labels is None
a.labels = LabelInfos(set(), set())
stackreader = StackMapReader()
for attr in c.attributes:
if a.pool.getutf(attr.name) == b'StackMapTable':
stackreader.setdata(attr.stream())
rexcepts = c.exceptions[::-1]
bcreader = Reader(c.bytecode)
while bcreader.size():
a.insline_start(bcreader.off, rexcepts, stackreader)
a.instruction(bcreader)
a.insline_start(bcreader.off, rexcepts, stackreader), a.eol()
badlbls = a.labels.used - a.labels.defined
if badlbls:
stackreader.valid = False
a.sol('; Labels used by invalid StackMapTable attribute'), a.eol()
for pos in sorted(badlbls):
a.sol('L{}'.format(pos) + ':'), a.eol()
if stackreader.stream and (stackreader.stream.size() or stackreader.count > 0):
stackreader.valid = False
if not stackreader.valid:
a.sol('.noimplicitstackmap'), a.eol()
for attr in c.attributes:
a.attribute(attr, use_raw_stackmap=not stackreader.valid)
a.labels = None
a.indentlevel -= 1
a.sol(), a.val('.end code')
def insline_start(a, pos, rexcepts, stackreader):
while rexcepts and rexcepts[-1].start <= pos:
e = rexcepts.pop()
a.sol(), a.val('.catch'), a.clsref(e.type), a.val('from'), a.lbl(e.start)
a.val('to'), a.lbl(e.end), a.val('using'), a.lbl(e.handler), a.eol()
if stackreader.count > 0 and stackreader.pos == pos:
r = stackreader
tag = stackreader.tag
a.extrablankline()
a.sol(), a.val('.stack')
if tag <= 63:
a.val('same')
elif tag <= 127:
a.val('stack_1'), a.verification_type(r)
elif tag == 247:
a.val('stack_1_extended'), a.verification_type(r)
elif tag < 251:
a.val('chop'), a.int(251 - tag)
elif tag == 251:
a.val('same_extended')
elif tag < 255:
a.val('append')
for _ in range(tag - 251):
a.verification_type(r)
else:
a.val('full')
a.indentlevel += 1
a.eol(), a.sol(), a.val('locals')
for _ in range(r.u16()):
a.verification_type(r)
a.eol(), a.sol(), a.val('stack')
for _ in range(r.u16()):
a.verification_type(r)
a.indentlevel -= 1
a.eol(), a.sol(), a.val('.end stack')
a.eol()
stackreader.parseNextPos()
a.sol('L{}'.format(pos) + ':')
a.labels.defined.add(pos)
def verification_type(a, r):
try:
tag = codes.vt_rcodes[r.u8()]
except IndexError:
r.valid = False
a.val('Top')
return
a.val(tag)
if tag == 'Object':
a.clsref(r.u16())
elif tag == 'Uninitialized':
a.lbl(r.u16())
def instruction(a, r):
pos = r.off
op = OPNAMES[r.u8()]
a.val(op)
if op in OP_LBL:
a.lbl(pos + (r.s32() if op.endswith('_w') else r.s16()))
elif op in OP_SHORT:
a.int(r.u8())
elif op in OP_CLS:
a.clsref(r.u16())
elif op in OP_FMIM:
a.fmimref(r.u16())
elif op == 'invokeinterface':
a.fmimref(r.u16()), a.int(r.u8()), r.u8()
elif op == 'invokedynamic':
a.taggedref(r.u16()), r.u16()
elif op in ['ldc', 'ldc_w', 'ldc2_w']:
a.ldcrhs(r.u8() if op == 'ldc' else r.u16())
elif op == 'multianewarray':
a.clsref(r.u16()), a.int(r.u8())
elif op == 'bipush':
a.int(r.s8())
elif op == 'sipush':
a.int(r.s16())
elif op == 'iinc':
a.int(r.u8()), a.int(r.s8())
elif op == 'wide':
op2 = OPNAMES[r.u8()]
a.val(op2), a.int(r.u16())
if op2 == 'iinc':
a.int(r.s16())
elif op == 'newarray':
a.val(codes.newarr_rcodes[r.u8()])
elif op == 'tableswitch':
r.getRaw((3-pos) % 4)
default = pos + r.s32()
low, high = r.s32(), r.s32()
a.int(low), a.eol()
a.indentlevel += 1
for _ in range(high - low + 1):
a.sol(), a.lbl(pos + r.s32()), a.eol()
a.sol(), a.val('default'), a.val(':'), a.lbl(default), a.eol()
a.indentlevel -= 1
elif op == 'lookupswitch':
r.getRaw((3-pos) % 4)
default = pos + r.s32()
a.eol()
a.indentlevel += 1
for _ in range(r.s32()):
a.sol(), a.int(r.s32()), a.val(':'), a.lbl(pos + r.s32()), a.eol()
a.sol(), a.val('default'), a.val(':'), a.lbl(default), a.eol()
a.indentlevel -= 1
else:
assert op in OP_NONE
a.eol()
###########################################################################
### Attributes ############################################################
def attribute(a, attr, in_method=False, use_raw_stackmap=False):
name = a.pool.getutf(attr.name)
if not a.roundtrip and name in (b'BootstrapMethods', b'StackMapTable'):
return
# a.extrablankline()
a.sol()
isnamed = False
if a.roundtrip or name is None:
isnamed = True
a.val('.attribute'), a.utfref(attr.name)
if attr.wronglength:
a.val('length'), a.int(attr.length)
if name == b'Code' and in_method:
a.code(attr.stream())
elif name == b'BootstrapMethods' and a.cls.version >= (51, 0):
a.val('.bootstrapmethods')
elif name == b'StackMapTable' and not use_raw_stackmap:
a.val('.stackmaptable')
elif a.attribute_fallible(name, attr):
pass
else:
print('Nonstandard attribute', name[:70], len(attr.raw))
if not isnamed:
a.val('.attribute'), a.utfref(attr.name)
a.val(reprbytes(attr.raw))
a.eol()
def attribute_fallible(a, name, attr):
# Temporarily buffer output so we don't get partial output if attribute disassembly fails
# in case of failure, we'll fall back to binary output in the caller
orig_out = a.out
buffer_ = []
a.out = buffer_.append
try:
r = attr.stream()
if name == b'AnnotationDefault':
a.val('.annotationdefault'), a.element_value(r)
elif name == b'ConstantValue':
a.val('.constantvalue'), a.ldcrhs(r.u16())
elif name == b'Deprecated':
a.val('.deprecated')
elif name == b'EnclosingMethod':
a.val('.enclosing method'), a.clsref(r.u16()), a.natref(r.u16())
elif name == b'Exceptions':
a.val('.exceptions')
for _ in range(r.u16()):
a.clsref(r.u16())
elif name == b'InnerClasses':
a.indented_line_list(r, a._innerclasses_item, 'innerclasses')
elif name == b'LineNumberTable':
a.indented_line_list(r, a._linenumber_item, 'linenumbertable')
elif name == b'LocalVariableTable':
a.indented_line_list(r, a._localvariabletable_item, 'localvariabletable')
elif name == b'LocalVariableTypeTable':
a.indented_line_list(r, a._localvariabletable_item, 'localvariabletypetable')
elif name == b'MethodParameters':
a.indented_line_list(r, a._methodparams_item, 'methodparameters', bytelen=True)
elif name == b'Module':
a.module_attr(r)
elif name == b'ModuleMainClass':
a.val('.modulemainclass'), a.clsref(r.u16())
elif name == b'ModulePackages':
a.val('.modulepackages')
for _ in range(r.u16()):
a.clsref(r.u16(), tag='Package')
elif name == b'NestHost':
a.val('.nesthost'), a.clsref(r.u16())
elif name == b'NestMembers':
a.val('.nestmembers')
for _ in range(r.u16()):
a.clsref(r.u16())
elif name == b'Record':
a.indented_line_list(r, a._record_component, 'record')
elif name in (b'RuntimeVisibleAnnotations', b'RuntimeVisibleParameterAnnotations',
b'RuntimeVisibleTypeAnnotations', b'RuntimeInvisibleAnnotations',
b'RuntimeInvisibleParameterAnnotations', b'RuntimeInvisibleTypeAnnotations'):
a.val('.runtime')
a.val('invisible' if b'Inv' in name else 'visible')
if b'Type' in name:
a.val('typeannotations'), a.eol()
a.indented_line_list(r, a.type_annotation_line, 'runtime', False)
elif b'Parameter' in name:
a.val('paramannotations'), a.eol()
a.indented_line_list(r, a.param_annotation_line, 'runtime', False, bytelen=True)
else:
a.val('annotations'), a.eol()
a.indented_line_list(r, a.annotation_line, 'runtime', False)
elif name == b'Signature':
a.val('.signature'), a.utfref(r.u16())
elif name == b'SourceDebugExtension':
a.val('.sourcedebugextension')
a.val(reprbytes(attr.raw))
elif name == b'SourceFile':
a.val('.sourcefile'), a.utfref(r.u16())
elif name == b'Synthetic':
a.val('.synthetic')
# check for extra data in the attribute
if r.size():
raise DisassemblyError()
except (TruncatedStreamError, DisassemblyError):
a.out = orig_out
return False
a.out = orig_out
a.out(''.join(buffer_))
return True
def module_attr(a, r):
a.val('.module'), a.clsref(r.u16(), tag='Module'), a.flags(r.u16(), flags.RFLAGS_MOD_OTHER)
a.val('version'), a.utfref(r.u16()), a.eol()
a.indentlevel += 1
for _ in range(r.u16()):
a.sol(), a.val('.requires'), a.clsref(r.u16(), tag='Module'), a.flags(r.u16(), flags.RFLAGS_MOD_REQUIRES), a.val('version'), a.utfref(r.u16()), a.eol()
for dir_ in ('.exports', '.opens'):
for _ in range(r.u16()):
a.sol(), a.val(dir_), a.clsref(r.u16(), tag='Package'), a.flags(r.u16(), flags.RFLAGS_MOD_OTHER)
count = r.u16()
if count:
a.val('to')
for _ in range(count):
a.clsref(r.u16(), tag='Module')
a.eol()
for _ in range(r.u16()):
a.sol(), a.val('.uses'), a.clsref(r.u16()), a.eol()
for _ in range(r.u16()):
a.sol(), a.val('.provides'), a.clsref(r.u16()), a.val('with')
for _ in range(r.u16()):
a.clsref(r.u16())
a.eol()
a.indentlevel -= 1
a.sol(), a.val('.end module')
def indented_line_list(a, r, cb, dirname, dostart=True, bytelen=False):
if dostart:
a.val('.' + dirname), a.eol()
a.indentlevel += 1
for _ in range(r.u8() if bytelen else r.u16()):
a.sol(), cb(r), a.eol()
a.indentlevel -= 1
if dirname is not None:
a.sol(), a.val('.end ' + dirname)
def _innerclasses_item(a, r): a.clsref(r.u16()), a.clsref(r.u16()), a.utfref(r.u16()), a.flags(r.u16(), flags.RFLAGS_CLASS)
def _linenumber_item(a, r): a.try_lbl(r.u16()), a.int(r.u16())
def _localvariabletable_item(a, r):
start, length, name, desc, ind = r.u16(), r.u16(), r.u16(), r.u16(), r.u16()
a.int(ind), a.val('is'), a.utfref(name), a.utfref(desc),
a.val('from'), a.try_lbl(start), a.val('to'), a.try_lbl(start + length)
def _methodparams_item(a, r): a.utfref(r.u16()), a.flags(r.u16(), flags.RFLAGS_MOD_OTHER)
def _record_component(a, r):
a.utfref(r.u16()), a.utfref(r.u16())
attrs = [classdata.AttributeData(r) for _ in range(r.u16())]
if attrs:
a.val('.attributes'), a.eol()
a.indentlevel += 1
for attr in attrs:
a.attribute(attr)
a.indentlevel -= 1
a.sol(), a.val('.end attributes')
###########################################################################
### Annotations ###########################################################
def annotation_line(a, r):
a.val('.annotation'), a.annotation_contents(r), a.sol(), a.val('.end'), a.val('annotation')
def param_annotation_line(a, r):
a.indented_line_list(r, a.annotation_line, 'paramannotation')
def type_annotation_line(a, r):
a.val('.typeannotation')
a.indentlevel += 1
a.ta_target_info(r) # Note: begins on same line as .typeannotation
a.ta_target_path(r)
a.sol(), a.annotation_contents(r),
a.indentlevel -= 1
a.sol(), a.val('.end'), a.val('typeannotation')
def ta_target_info(a, r):
tag = r.u8()
a.int(tag)
if tag <= 0x01:
a.val('typeparam'), a.int(r.u8())
elif tag <= 0x10:
a.val('super'), a.int(r.u16())
elif tag <= 0x12:
a.val('typeparambound'), a.int(r.u8()), a.int(r.u8())
elif tag <= 0x15:
a.val('empty')
elif tag <= 0x16:
a.val('methodparam'), a.int(r.u8())
elif tag <= 0x17:
a.val('throws'), a.int(r.u16())
elif tag <= 0x41:
a.val('localvar'), a.eol()
a.indented_line_list(r, a._localvarrange, 'localvar', False)
elif tag <= 0x42:
a.val('catch'), a.int(r.u16())
elif tag <= 0x46:
a.val('offset'), a.try_lbl(r.u16())
else:
a.val('typearg'), a.try_lbl(r.u16()), a.int(r.u8())
a.eol()
def _localvarrange(a, r):
start, length, index = r.u16(), r.u16(), r.u16()
if start == length == 0xFFFF: # WTF, Java?
a.val('nowhere')
else:
a.val('from'), a.try_lbl(start), a.val('to'), a.try_lbl(start + length)
a.int(index)
def ta_target_path(a, r):
a.sol(), a.indented_line_list(r, a._type_path_segment, 'typepath', bytelen=True), a.eol()
def _type_path_segment(a, r):
a.int(r.u8()), a.int(r.u8())
# The following are recursive and can be nested arbitrarily deep,
# so we use generators and a thunk to avoid the Python stack limit.
def element_value(a, r): thunk(a._element_value(r))
def annotation_contents(a, r): thunk(a._annotation_contents(r))
def _element_value(a, r):
tag = codes.et_rtags.get(r.u8())
if tag is None:
raise DisassemblyError()
a.val(tag)
if tag == 'annotation':
(yield a._annotation_contents(r)), a.sol(), a.val('.end'), a.val('annotation')
elif tag == 'array':
a.eol()
a.indentlevel += 1
for _ in range(r.u16()):
a.sol(), (yield a._element_value(r)), a.eol()
a.indentlevel -= 1
a.sol(), a.val('.end'), a.val('array')
elif tag == 'enum':
a.utfref(r.u16()), a.utfref(r.u16())
elif tag == 'class' or tag == 'string':
a.utfref(r.u16())
else:
a.ldcrhs(r.u16())
def _annotation_contents(a, r):
a.utfref(r.u16()), a.eol()
a.indentlevel += 1
for _ in range(r.u16()):
a.sol(), a.utfref(r.u16()), a.val('='), (yield a._element_value(r)), a.eol()
a.indentlevel -= 1
| gpl-3.0 | -7,258,875,840,090,898,000 | 36.104287 | 211 | 0.51307 | false |
AWPorter/aima-python | submissions/Dickenson/myLogic.py | 17 | 2054 | evolution = {
'kb': '''
Kingdom(Animalia, Chordata)
Phylum(Chordata, Reptilia)
Phylum(Chordata, Aves)
Class(Reptilia, Squamata)
Class(Reptilia, Crocodilia)
Order(Squamata, Serpentes)
Order(Squamata,Scincomorpha)
Suborder(Serpentes, GarterSnake)
Suborder(Serpentes, ReticulatedPython)
Suborder(Serpentes, BlueLippedSeaKrait)
Suborder(Scincomorpha, CommonGreySkink)
Suborder(Scincomorpha, RainbowSkink)
Suborder(Scincomorpha, NorthernBlueTongueSkink)
Order(Crocodilia, Crocodylidae)
Order(Crocodilia, Alligatoridae)
Suborder(Crocodylidae, SaltwaterCrocodile)
Suborder(Crocodylidae, NileCrocodile)
Suborder(Alligatoridae, AmericanAlligator)
Class(Aves, Anseriformes)
Order(Anseriformes, Anatidae)
Order(Anseriformes, Anseranatidae)
Suborder(Anatidae, BlackBelliedWhistlingDuck)
Suborder(Anseranatidae, MagpieGoose)
Suborder(w, x) & Suborder(w, y) ==> EvolutionarySibling(x, y)
#Can't find a way to do ~EvolutionarySibling(p, q); that would make cousin more accurate. As it is, it also generates all siblings
#(similar to how siblings also generates itself. Same for future methods, which include every level shallower as well. Therefore, each method is
#now prefaced with "at least", because it will not only give you species at that relationship level, but also every level shallower.)
Order(w, x) & Order(w, y) & Suborder(x, q) & Suborder(y, p) ==> AtLeastEvolutionaryCousin(q, p)
Class(w, x) & Class(w, y) & Order(x, l) & Order(y, f) & Suborder(l, q) & Suborder(f, p) ==> AtLeastEvolutionaryTwiceRemoved(q, p)
Phylum(w, o) & Phylum(w, j) & Class(o, x) & Class(j, y) & Order(x, l) & Order(y, f) & Suborder(l, q) & Suborder(f, p) ==> AtLeastDistantRelatives(q, p)
''',
'queries':'''
EvolutionarySibling(GarterSnake, y)
AtLeastEvolutionaryCousin(q, SaltwaterCrocodile)
AtLeastEvolutionaryTwiceRemoved(CommonGreySkink, p)
AtLeastDistantRelatives(MagpieGoose, p)
EvolutionarySibling(x, y)
''',
'limit': 100,
}
Examples = {
'evolution': evolution,
} | mit | -3,969,997,913,297,517,600 | 39.294118 | 151 | 0.729309 | false |
MarkusHackspacher/unknown-horizons | horizons/util/yamlcache.py | 1 | 5061 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
import logging
import os
import threading
from typing import Optional
import yaml
from horizons.constants import BUILDINGS, PATHS, RES, TIER, UNITS
from horizons.util.yamlcachestorage import YamlCacheStorage
try:
from yaml import CSafeLoader as SafeLoader # type: ignore
except ImportError:
from yaml import SafeLoader # type: ignore
# make SafeLoader allow unicode
def construct_yaml_str(self, node):
return self.construct_scalar(node)
SafeLoader.add_constructor('tag:yaml.org,2002:python/unicode', construct_yaml_str)
SafeLoader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
def parse_token(token, token_klass):
"""Helper function that tries to parse a constant name.
Does not do error detection, but passes unparseable stuff through.
Allowed values: integer or token_klass.LIKE_IN_CONSTANTS
@param token_klass: "TIER", "RES", "UNITS" or "BUILDINGS"
"""
classes = {'TIER': TIER, 'RES': RES, 'UNITS': UNITS, 'BUILDINGS': BUILDINGS}
if not isinstance(token, str):
# Probably numeric already
return token
if not token.startswith(token_klass):
# No need to parse anything
return token
try:
return getattr(classes[token_klass], token.split(".", 2)[1])
except AttributeError as e: # token not defined here
err = ("This means that you either have to add an entry in horizons/constants.py "
"in the class {0!s} for {1!s},\nor {2!s} is actually a typo.".
format(token_klass, token, token))
raise Exception(str(e) + "\n\n" + err + "\n")
def convert_game_data(data):
"""Translates convenience symbols into actual game data usable by machines"""
if isinstance(data, dict):
return dict([convert_game_data(i) for i in data.items()])
elif isinstance(data, (tuple, list)):
return type(data)((convert_game_data(i) for i in data))
else: # leaf
data = parse_token(data, "TIER")
data = parse_token(data, "RES")
data = parse_token(data, "UNITS")
data = parse_token(data, "BUILDINGS")
return data
class YamlCache:
"""Loads and caches YAML files in a persistent cache.
Threadsafe.
Use get_file for files to cache (default case) or load_yaml_data for special use cases (behaves like yaml.load).
"""
cache = None # type: Optional[YamlCacheStorage]
cache_filename = os.path.join(PATHS.CACHE_DIR, 'yamldata.cache')
sync_scheduled = False
lock = threading.Lock()
log = logging.getLogger("yamlcache")
@classmethod
def load_yaml_data(cls, string_or_stream):
"""Use this instead of yaml.load everywhere in uh in case get_file isn't useable"""
return yaml.load(string_or_stream, Loader=SafeLoader)
@classmethod
def get_file(cls, filename, game_data=False):
"""Get contents of a yaml file
@param filename: path to the file
@param game_data: Whether this file contains data like BUILDINGS.LUMBERJACK to resolve
"""
with open(filename, 'r', encoding="utf-8") as f:
filedata = f.read()
# calc the hash
h = hash(filedata)
# check for updates or new files
if cls.cache is None:
cls._open_cache()
yaml_file_in_cache = (filename in cls.cache and cls.cache[filename][0] == h)
if not yaml_file_in_cache:
data = cls.load_yaml_data(filedata)
if game_data: # need to convert some values
try:
data = convert_game_data(data)
except Exception as e:
# add info about file
to_add = "\nThis error happened in {0!s} .".format(filename)
e.args = (e.args[0] + to_add, ) + e.args[1:]
e.message = (e.message + to_add)
raise
cls.lock.acquire()
cls.cache[filename] = (h, data)
if not cls.sync_scheduled:
cls.sync_scheduled = True
from horizons.extscheduler import ExtScheduler
ExtScheduler().add_new_object(cls._do_sync, cls, run_in=1)
cls.lock.release()
return cls.cache[filename][1] # returns an object from the YAML
@classmethod
def _open_cache(cls):
cls.lock.acquire()
cls.cache = YamlCacheStorage.open(cls.cache_filename)
cls.lock.release()
@classmethod
def _do_sync(cls):
"""Only write to disc once in a while, it's too slow when done every time"""
cls.lock.acquire()
cls.sync_scheduled = False
cls.cache.sync()
cls.lock.release()
| gpl-2.0 | -3,035,042,797,824,739,300 | 31.442308 | 113 | 0.698676 | false |
PiRSquared17/stoqs | utils/MPQuery.py | 4 | 39035 | __author__ = 'Mike McCann'
__copyright__ = '2012'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
MeasuredParameter Query class for managing aspects of building requests for MeasuredParameter datavalues.
Intended to be used by utils/STOQSQManager.py for preventing multiple traversals of qs_mp and by
views/__init__.py to support query by parameter value for the REST responses.
This module (though called MPQuery) also contains and SPQuerySet class to handle the Sample portion of
the STOQS data model. Sample and Measurment are almost synonomous, expecially with their relationships
to InstantPoint and SampledParameter/MeasuredParameter. The MPQuery class has a lot of machinery that
for which checks are made on which ParameterGroup the Parameter belongs to execute to proper code for
a Sample or a Measurement.
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
from django.conf import settings
from django.db.models.query import REPR_OUTPUT_SIZE, RawQuerySet, ValuesQuerySet
from django.contrib.gis.db.models.query import GeoQuerySet
from django.db import DatabaseError
from datetime import datetime
from stoqs.models import MeasuredParameter, Parameter, SampledParameter, ParameterGroupParameter, MeasuredParameterResource
from utils import postgresifySQL, getGet_Actual_Count, getParameterGroups
from loaders import MEASUREDINSITU
from loaders.SampleLoaders import SAMPLED
from PQuery import PQuery
import logging
import pprint
import re
import locale
import time
import os
import tempfile
import sqlparse
logger = logging.getLogger(__name__)
ITER_HARD_LIMIT = 100000
class MPQuerySet(object):
'''
A class to simulate a GeoQuerySet that's suitable for use everywhere a GeoQuerySet may be used.
This special class supports adapting MeasuredParameter RawQuerySets to make them look like regular
GeoQuerySets. See: http://ramenlabs.com/2010/12/08/how-to-quack-like-a-queryset/. (I looked at Google
again to see if self-joins are possible in Django, and confirmed that they are probably not.
See: http://stackoverflow.com/questions/1578362/self-join-with-django-orm.)
'''
rest_columns = [ 'parameter__id',
'parameter__name',
'parameter__standard_name',
'measurement__depth',
'measurement__geom',
'measurement__instantpoint__timevalue',
'measurement__instantpoint__activity__name',
'measurement__instantpoint__activity__platform__name',
'datavalue',
'parameter__units'
]
kml_columns = [ 'parameter__name',
'parameter__standard_name',
'measurement__depth',
'measurement__geom',
'measurement__instantpoint__timevalue',
'measurement__instantpoint__activity__platform__name',
'datavalue',
]
ui_timedepth_columns = [
'measurement__depth',
'measurement__instantpoint__timevalue',
'measurement__instantpoint__activity__name',
'datavalue',
]
def __init__(self, query, values_list, qs_mp=None):
'''
Initialize MPQuerySet with either raw SQL in @query or a QuerySet in @qs_mp.
Use @values_list to request just the fields (columns) needed. The class variables
rest_colums and kml_columns are typical value_lists. Note: specifying a values_list
appears to break the correct serialization of geometry types in the json response.
Called by stoqs/views/__init__.py when MeasuredParameter REST requests are made.
'''
self.isRawQuerySet = False
if query is None and qs_mp is not None:
logger.debug('query is None and qs_mp is not None')
self.query = postgresifySQL(str(qs_mp.query))
self.mp_query = qs_mp
elif query is not None and qs_mp is None:
logger.debug('query is not None and qs_mp is None')
self.query = query
self.mp_query = MeasuredParameter.objects.raw(query)
self.isRawQuerySet = True
else:
raise Exception('Either query or qs_mp must be not None and the other be None.')
self.values_list = values_list
self.ordering = ('id',)
def __iter__(self):
'''
Main way to access data that is used by interators in templates, etc.
Simulate behavior of regular GeoQuerySets. Modify & format output as needed.
'''
minimal_values_list = False
for item in self.rest_columns:
if item not in self.values_list:
minimal_values_list = True
break
logger.debug('minimal_values_list = %s', minimal_values_list)
logger.debug('self.query = %s', self.query)
logger.debug('type(self.mp_query) = %s', type(self.mp_query))
# Must have model instance objects for JSON serialization of geometry fields to work right
if minimal_values_list:
# Likely for Flot contour plot
try:
# Dictionaries
for mp in self.mp_query[:ITER_HARD_LIMIT]:
row = { 'measurement__depth': mp['measurement__depth'],
'measurement__instantpoint__timevalue': mp['measurement__instantpoint__timevalue'],
'measurement__instantpoint__activity__name': mp['measurement__instantpoint__activity__name'],
'datavalue': mp['datavalue'],
}
yield row
except TypeError:
# Model instances
for mp in self.mp_query[:ITER_HARD_LIMIT]:
row = { 'measurement__depth': mp.measurement.depth,
'measurement__instantpoint__timevalue': mp.measurement.instantpoint.timevalue,
'measurement__instantpoint__activity__name': mp.measurement.instantpoint.activity.name,
'datavalue': mp.datavalue,
}
yield row
else:
# Likely for building a REST or KML response
logger.debug('type(self.mp_query) = %s', type(self.mp_query))
try:
# Dictionaries
for mp in self.mp_query[:ITER_HARD_LIMIT]:
row = {
'measurement__depth': mp['measurement__depth'],
'parameter__id': mp['parameter__id'],
'parameter__name': mp['parameter__name'],
'datavalue': mp['datavalue'],
'measurement__instantpoint__timevalue': mp['measurement__instantpoint__timevalue'],
'parameter__standard_name': mp['parameter__standard_name'],
'measurement__instantpoint__activity__name': mp['measurement__instantpoint__activity__name'],
'measurement__instantpoint__activity__platform__name': mp['measurement__instantpoint__activity__platform__name'],
# If .values(...) are requested in the query string then json serialization of the point geometry does not work right
'measurement__geom': mp['measurement__geom'],
'parameter__units': mp['parameter__units'],
}
yield row
except TypeError:
# Model instances
for mp in self.mp_query[:ITER_HARD_LIMIT]:
row = {
'measurement__depth': mp.measurement.depth,
'parameter__id': mp.parameter__id,
'parameter__name': mp.parameter__name,
'datavalue': mp.datavalue,
'measurement__instantpoint__timevalue': mp.measurement.instantpoint.timevalue,
'parameter__standard_name': mp.parameter.standard_name,
'measurement__instantpoint__activity__name': mp.measurement.instantpoint.activity.name,
'measurement__instantpoint__activity__platform__name': mp.measurement.instantpoint.activity.platform.name,
'measurement__geom': mp.measurement.geom,
'parameter__units': mp.parameter.units,
}
yield row
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __getitem__(self, k):
'''
Boiler plate copied from http://ramenlabs.com/2010/12/08/how-to-quack-like-a-queryset/.
Is used for slicing data, e.g. for subsampling data for sensortracks
'''
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if isinstance(k, slice):
return self.mp_query[k]
def count(self):
logger.debug('Counting records in self.mp_query which is of type = %s', type(self.mp_query))
# self.mp_query should contain no 'ORDER BY' as ensured by the routine that calls .count()
try:
c = self.mp_query.count()
logger.debug('c = %d as retreived from self.mp_query.count()', c)
except AttributeError:
try:
c = sum(1 for mp in self.mp_query)
logger.debug('c = %d as retreived from sum(1 for mp in self.mp_query)', c)
except DatabaseError:
return 0
return c
def all(self):
return self._clone()
def filter(self, *args, **kwargs):
qs = self._clone()
logger.debug('type(qs) = %s', type(qs))
qs.mp_query = qs.mp_query.filter(*args, **kwargs)
return qs.mp_query
def exclude(self, *args, **kwargs):
qs = self._clone()
qs.mp_query = qs.mp_query.exclude(*args, **kwargs)
return qs.mp_query
def order_by(self, *args, **kwargs):
qs = self._clone()
qs.mp_query = qs.mp_query.order_by(*args, **kwargs)
return qs.mp_query
def _clone(self):
qs = MPQuerySet(self.query, self.values_list)
qs.mp_query = self.mp_query._clone()
return qs
class SPQuerySet(object):
'''
A class to simulate a GeoQuerySet that's suitable for use everywhere a GeoQuerySet may be used.
This special class supports adapting SampledParameter RawQuerySets to make them look like regular
GeoQuerySets. See: http://ramenlabs.com/2010/12/08/how-to-quack-like-a-queryset/. (I looked at Google
again to see if self-joins are possible in Django, and confirmed that they are probably not.
See: http://stackoverflow.com/questions/1578362/self-join-with-django-orm.)
'''
rest_columns = [ 'parameter__id',
'parameter__name',
'parameter__standard_name',
'sample__depth',
'sample__geom',
'sample__instantpoint__timevalue',
'sample__instantpoint__activity__name',
'sample__instantpoint__activity__platform__name',
'sample__instantpoint__activity__startdate',
'sample__instantpoint__activity__enddate',
'sample__instantpoint__activity__mindepth',
'sample__instantpoint__activity__maxdepth',
'datavalue',
'parameter__units'
]
kml_columns = [ 'parameter__name',
'parameter__standard_name',
'sample__depth',
'sample__geom',
'sample__instantpoint__timevalue',
'sample__instantpoint__activity__platform__name',
'datavalue',
]
ui_timedepth_columns = [
'sample__depth',
'sample__instantpoint__timevalue',
'sample__instantpoint__activity__name',
'sample__instantpoint__activity__startdate',
'sample__instantpoint__activity__enddate',
'sample__instantpoint__activity__mindepth',
'sample__instantpoint__activity__maxdepth',
'datavalue',
]
def __init__(self, query, values_list, qs_sp=None):
'''
Initialize SPQuerySet with either raw SQL in @query or a QuerySet in @qs_sp.
Use @values_list to request just the fields (columns) needed. The class variables
rest_colums and kml_columns are typical value_lists. Note: specifying a values_list
appears to break the correct serialization of geometry types in the json response.
Called by stoqs/views/__init__.py when SampledParameter REST requests are made.
'''
if query is None and qs_sp is not None:
logger.debug('query is None and qs_sp is not None')
self.query = postgresifySQL(str(qs_sp.query))
self.sp_query = qs_sp
elif query is not None and qs_sp is None:
logger.debug('query is not None and qs_sp is None')
self.query = query
self.sp_query = SampledParameter.objects.raw(query)
else:
raise Exception('Either query or qs_sp must be not None and the other be None.')
self.values_list = values_list
self.ordering = ('id',)
def __iter__(self):
'''
Main way to access data that is used by interators in templates, etc.
Simulate behavior of regular GeoQuerySets. Modify & format output as needed.
'''
minimal_values_list = False
for item in self.rest_columns:
if item not in self.values_list:
minimal_values_list = True
break
logger.debug('minimal_values_list = %s', minimal_values_list)
logger.debug('self.query = %s', self.query)
logger.debug('type(self.sp_query) = %s', type(self.sp_query))
if isinstance(self.sp_query, ValuesQuerySet):
logger.debug('self.sp_query is ValuesQuerySet')
if isinstance(self.sp_query, GeoQuerySet):
logger.debug('self.sp_query is GeoQuerySet')
if isinstance(self.sp_query, RawQuerySet):
logger.debug('self.sp_query is RawQuerySet')
# Must have model instance objects for JSON serialization of geometry fields to work right
if minimal_values_list:
# Likely for Flot contour plot
try:
# Dictionaries
for mp in self.sp_query[:ITER_HARD_LIMIT]:
row = { 'sample__depth': mp['sample__depth'],
'sample__instantpoint__timevalue': mp['sample__instantpoint__timevalue'],
'sample__instantpoint__activity__name': mp['sample__instantpoint__activity__name'],
'datavalue': mp['datavalue'],
}
yield row
except TypeError:
# Model instances
for mp in self.sp_query[:ITER_HARD_LIMIT]:
row = { 'sample__depth': mp.sample.depth,
'sample__instantpoint__timevalue': mp.sample.instantpoint.timevalue,
'sample__instantpoint__activity__name': mp.sample.instantpoint.activity.name,
'datavalue': mp.datavalue,
}
yield row
else:
# Likely for building a REST or KML response
logger.debug('type(self.sp_query) = %s', type(self.sp_query))
try:
# Dictionaries
for mp in self.sp_query[:ITER_HARD_LIMIT]:
row = {
'sample__depth': mp['sample__depth'],
'parameter__id': mp['parameter__id'],
'parameter__name': mp['parameter__name'],
'datavalue': mp['datavalue'],
'sample__instantpoint__timevalue': mp['sample__instantpoint__timevalue'],
'parameter__standard_name': mp['parameter__standard_name'],
'sample__instantpoint__activity__name': mp['sample__instantpoint__activity__name'],
'sample__instantpoint__activity__platform__name': mp['sample__instantpoint__activity__platform__name'],
# If .values(...) are requested in the query string then json serialization of the point geometry does not work right
'sample__geom': mp['sample__geom'],
'parameter__units': mp['parameter__units'],
}
yield row
except TypeError:
# Model instances
for mp in self.sp_query[:ITER_HARD_LIMIT]:
row = {
'sample__depth': mp.sample.depth,
'parameter__id': mp.parameter__id,
'parameter__name': mp.parameter__name,
'datavalue': mp.datavalue,
'sample__instantpoint__timevalue': mp.sample.instantpoint.timevalue,
'parameter__standard_name': mp.parameter.standard_name,
'sample__instantpoint__activity__name': mp.sample.instantpoint.activity.name,
'sample__instantpoint__activity__platform__name': mp.sample.instantpoint.activity.platform.name,
'sample__geom': mp.sample.geom,
'parameter__units': mp.parameter.units,
}
yield row
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __getitem__(self, k):
'''
Boiler plate copied from http://ramenlabs.com/2010/12/08/how-to-quack-like-a-queryset/.
Is used for slicing data, e.g. for subsampling data for sensortracks
'''
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if isinstance(k, slice):
return self.sp_query[k]
def count(self):
logger.debug('Counting records in self.sp_query which is of type = %s', type(self.sp_query))
# self.sp_query should contain no 'ORDER BY' as ensured by the routine that calls .count()
try:
c = self.sp_query.count()
logger.debug('c = %d as retreived from self.sp_query.count()', c)
except AttributeError:
try:
c = sum(1 for mp in self.sp_query)
logger.debug('c = %d as retreived from sum(1 for mp in self.sp_query)', c)
except DatabaseError:
return 0
return c
def all(self):
return self._clone()
def filter(self, *args, **kwargs):
qs = self._clone()
logger.debug('type(qs) = %s', type(qs))
qs.sp_query = qs.sp_query.filter(*args, **kwargs)
return qs.sp_query
def exclude(self, *args, **kwargs):
qs = self._clone()
qs.sp_query = qs.sp_query.exclude(*args, **kwargs)
return qs.sp_query
def order_by(self, *args, **kwargs):
qs = self._clone()
qs.sp_query = qs.sp_query.order_by(*args, **kwargs)
return qs.sp_query
def _clone(self):
qs = SPQuerySet(self.query, self.values_list)
qs.sp_query = self.sp_query._clone()
return qs
class MPQuery(object):
'''
This class is designed to handle building and managing queries against the MeasuredParameter table of the STOQS database.
Special tooling is needed to perform parameter value queries which require building raw sql statements in order to
execute the self joins needed on the measuredparameter table. The structure of RawQuerySet returned is harmonized
with the normal GeoQuerySet returned through regular .filter() operations by using the MPQuerySet "adapter".
'''
rest_select_items = '''stoqs_parameter.id as parameter__id,
stoqs_parameter.name as parameter__name,
stoqs_parameter.standard_name as parameter__standard_name,
stoqs_measurement.depth as measurement__depth,
stoqs_measurement.geom as measurement__geom,
stoqs_instantpoint.timevalue as measurement__instantpoint__timevalue,
stoqs_platform.name as measurement__instantpoint__activity__platform__name,
stoqs_measuredparameter.datavalue as datavalue,
stoqs_parameter.units as parameter__units'''
sampled_rest_select_items = '''stoqs_parameter.id as parameter__id,
stoqs_parameter.name as parameter__name,
stoqs_parameter.standard_name as parameter__standard_name,
stoqs_sample.depth as sample__depth,
stoqs_sample.geom as sample__geom,
stoqs_instantpoint.timevalue as sample__instantpoint__timevalue,
stoqs_platform.name as sample__instantpoint__activity__platform__name,
stoqs_sampledparameter.datavalue as datavalue,
stoqs_parameter.units as parameter__units'''
kml_select_items = ''
contour_select_items = ''
def __init__(self, request):
'''
This object saves instances of the QuerySet and count so that get_() methods work like a singleton to
return the value for the object. MPQuery objects are meant to be instantiated by the STOQSQManager
buildQuerySet() method and are unique for each AJAX request. After buildMPQuerySet() is executed
the member values below can be accessed.
'''
self.request = request
self.qs_mp = None
self.qs_mp_no_order = None
self.qs_sp = None
self.qs_sp_no_order = None
self.sql = None
self._count = None
self._MProws = []
self.parameterID = None
self.initialQuery = False
def buildMPQuerySet(self, *args, **kwargs):
'''
Build the query set based on selections from the UI. For the first time through kwargs will be empty
and self.qs_mp will have no constraints and will be all of the MeasuredParameters in the database.
This is called by utils/STOQSQueryManagery.py.
'''
if self.qs_mp is None:
parameterGroups = [MEASUREDINSITU]
self.kwargs = kwargs
if 'parameterplot' in self.kwargs:
if self.kwargs['parameterplot'][0]:
self.parameterID = self.kwargs['parameterplot'][0]
logger.debug('self.parameterID = %s', self.parameterID)
parameterGroups = getParameterGroups(self.request.META['dbAlias'], Parameter.objects.get(id=self.parameterID))
if SAMPLED in parameterGroups:
self.qs_sp = self.getSampledParametersQS()
if self.kwargs['showparameterplatformdata']:
logger.debug('Building qs_sp_no_order with values_list = %s', SPQuerySet.ui_timedepth_columns)
self.qs_sp_no_order = self.getSampledParametersQS(SPQuerySet.ui_timedepth_columns, orderedFlag=False)
else:
self.qs_sp_no_order = self.getSampledParametersQS(orderedFlag=False)
self.sql_sp = self.getSampledParametersPostgreSQL()
else:
# The default is to consider the Parameter MEASUREDINSITU if it's not SAMPLED
self.qs_mp = self.getMeasuredParametersQS()
if self.kwargs['showparameterplatformdata']:
logger.debug('Building qs_mp_no_order with values_list = %s', SPQuerySet.ui_timedepth_columns)
self.qs_mp_no_order = self.getMeasuredParametersQS(MPQuerySet.ui_timedepth_columns, orderedFlag=False)
else:
self.qs_mp_no_order = self.getMeasuredParametersQS(orderedFlag=False)
self.sql = self.getMeasuredParametersPostgreSQL()
def _getQueryParms(self, group=MEASUREDINSITU):
'''
Extract constraints from the querystring kwargs to construct a dictionary of query parameters
that can be used as a filter for MeasuredParameters. Handles all constraints except parameter
value constraints.
'''
qparams = {}
##logger.debug('self.kwargs = %s', pprint.pformat(self.kwargs))
logger.debug('group = %s', group)
if group == SAMPLED:
if 'sampledparametersgroup' in self.kwargs:
if self.kwargs['sampledparametersgroup']:
qparams['parameter__id__in'] = self.kwargs['sampledparametersgroup']
if 'parameterstandardname' in self.kwargs:
if self.kwargs['parameterstandardname']:
qparams['parameter__standard_name__in'] = self.kwargs['parameterstandardname']
if 'platforms' in self.kwargs:
if self.kwargs['platforms']:
qparams['sample__instantpoint__activity__platform__name__in'] = self.kwargs['platforms']
if 'time' in self.kwargs:
if self.kwargs['time'][0] is not None:
qparams['sample__instantpoint__timevalue__gte'] = self.kwargs['time'][0]
if self.kwargs['time'][1] is not None:
qparams['sample__instantpoint__timevalue__lte'] = self.kwargs['time'][1]
if 'depth' in self.kwargs:
if self.kwargs['depth'][0] is not None:
qparams['sample__depth__gte'] = self.kwargs['depth'][0]
if self.kwargs['depth'][1] is not None:
qparams['sample__depth__lte'] = self.kwargs['depth'][1]
if getGet_Actual_Count(self.kwargs):
# Make sure that we have at least time so that the instantpoint table is included
if not 'sample__instantpoint__timevalue__gte' in qparams:
qparams['sample__instantpoint__pk__isnull'] = False
else:
if self.kwargs.has_key('measuredparametersgroup'):
if self.kwargs['measuredparametersgroup']:
qparams['parameter__name__in'] = self.kwargs['measuredparametersgroup']
if self.kwargs.has_key('parameterstandardname'):
if self.kwargs['parameterstandardname']:
qparams['parameter__standard_name__in'] = self.kwargs['parameterstandardname']
if self.kwargs.has_key('platforms'):
if self.kwargs['platforms']:
qparams['measurement__instantpoint__activity__platform__name__in'] = self.kwargs['platforms']
if self.kwargs.has_key('time'):
if self.kwargs['time'][0] is not None:
qparams['measurement__instantpoint__timevalue__gte'] = self.kwargs['time'][0]
if self.kwargs['time'][1] is not None:
qparams['measurement__instantpoint__timevalue__lte'] = self.kwargs['time'][1]
if self.kwargs.has_key('depth'):
if self.kwargs['depth'][0] is not None:
qparams['measurement__depth__gte'] = self.kwargs['depth'][0]
if self.kwargs['depth'][1] is not None:
qparams['measurement__depth__lte'] = self.kwargs['depth'][1]
if 'mplabels' in self.kwargs:
if self.kwargs['mplabels' ]:
qparams['measurement__id__in'] = MeasuredParameterResource.objects.using(self.request.META['dbAlias']).filter(
resource__id__in=self.kwargs['mplabels' ]).values_list('measuredparameter__measurement__id', flat=True)
if getGet_Actual_Count(self.kwargs):
# Make sure that we have at least time so that the instantpoint table is included
if not qparams.has_key('measurement__instantpoint__timevalue__gte'):
qparams['measurement__instantpoint__pk__isnull'] = False
logger.debug('qparams = %s', pprint.pformat(qparams))
return qparams
def getMeasuredParametersQS(self, values_list=[], orderedFlag=True):
'''
Return query set of MeasuremedParameters given the current constraints. If no parameter is selected return None.
@values_list can be assigned with additional columns that are supported by MPQuerySet(). Note that specificiation
of a values_list will break the JSON serialization of geometry types. @orderedFlag may be set to False to reduce
memory and time taken for queries that don't need ordered values. If parameterID is not none then that parameter
is added to the filter - used for parameterPlatformPNG generation.
'''
qparams = self._getQueryParms()
logger.debug('Building qs_mp...')
if values_list == []:
# If no .values(...) added to QS then items returned by iteration on qs_mp are model objects, not out wanted dictionaries
logger.debug('... with values_list = []; using default rest_columns')
qs_mp = MeasuredParameter.objects.using(self.request.META['dbAlias']).filter(**qparams).values(*MPQuerySet.rest_columns)
else:
logger.debug('... with values_list = %s', values_list)
qs_mp = MeasuredParameter.objects.using(self.request.META['dbAlias']).select_related(depth=2).filter(**qparams).values(*values_list)
if self.parameterID:
logger.debug('Adding parameter__id=%d filter to qs_mp', int(self.parameterID))
qs_mp = qs_mp.filter(parameter__id=int(self.parameterID))
if orderedFlag:
qs_mp = qs_mp.order_by('measurement__instantpoint__activity__name', 'measurement__instantpoint__timevalue')
# Wrap MPQuerySet around either RawQuerySet or GeoQuerySet to control the __iter__() items for lat/lon etc.
if self.kwargs.has_key('parametervalues'):
if self.kwargs['parametervalues']:
# A depth of 4 is needed in order to see Platform
qs_mp = MeasuredParameter.objects.using(self.request.META['dbAlias']).select_related(depth=4).filter(**qparams)
if self.parameterID:
logger.debug('Adding parameter__id=%d filter to qs_mp', int(self.parameterID))
qs_mp = qs_mp.filter(parameter__id=int(self.parameterID))
if orderedFlag:
qs_mp = qs_mp.order_by('measurement__instantpoint__activity__name', 'measurement__instantpoint__timevalue')
sql = postgresifySQL(str(qs_mp.query))
logger.debug('\n\nsql before query = %s\n\n', sql)
pq = PQuery(self.request)
sql = pq.addParameterValuesSelfJoins(sql, self.kwargs['parametervalues'], select_items=self.rest_select_items)
logger.debug('\n\nsql after parametervalue query = %s\n\n', sql)
qs_mpq = MPQuerySet(sql, values_list)
else:
logger.debug('Building MPQuerySet with qs_mpquery = %s', str(qs_mp.query))
qs_mpq = MPQuerySet(None, values_list, qs_mp=qs_mp)
else:
logger.debug('Building MPQuerySet with qs_mpquery = %s', str(qs_mp.query))
qs_mpq = MPQuerySet(None, values_list, qs_mp=qs_mp)
if qs_mpq is None:
logger.debug('qs_mpq.query = %s', str(qs_mpq.query))
else:
logger.debug("No queryset returned for qparams = %s", pprint.pformat(qparams))
return qs_mpq
def getSampledParametersQS(self, values_list=[], orderedFlag=True):
'''
Return query set of SampledParameters given the current constraints. If no parameter is selected return None.
@values_list can be assigned with additional columns that are supported by SPQuerySet(). Note that specificiation
of a values_list will break the JSON serialization of geometry types. @orderedFlag may be set to False to reduce
memory and time taken for queries that don't need ordered values. If parameterID is not none then that parameter
is added to the filter - used for parameterPlatformPNG generation.
'''
qparams = self._getQueryParms(group=SAMPLED)
logger.debug('Building qs_sp...')
if values_list == []:
# If no .values(...) added to QS then items returned by iteration on qs_sp are model objects, not out wanted dictionaries
values_list = SPQuerySet.rest_columns
qs_sp = SampledParameter.objects.using(self.request.META['dbAlias']).filter(**qparams).values(*values_list)
else:
qs_sp = SampledParameter.objects.using(self.request.META['dbAlias']).select_related(depth=2).filter(**qparams).values(*values_list)
if self.parameterID:
logger.debug('Adding parameter__id=%d filter to qs_sp', int(self.parameterID))
qs_sp = qs_sp.filter(parameter__id=int(self.parameterID))
if orderedFlag:
qs_sp = qs_sp.order_by('sample__instantpoint__activity__name', 'sample__instantpoint__timevalue')
# Wrap SPQuerySet around either RawQuerySet or GeoQuerySet to control the __iter__() items for lat/lon etc.
if self.kwargs.has_key('parametervalues'):
if self.kwargs['parametervalues']:
# A depth of 4 is needed in order to see Platform
qs_sp = SampledParameter.objects.using(self.request.META['dbAlias']).select_related(depth=4).filter(**qparams)
if orderedFlag:
qs_sp = qs_sp.order_by('sample__instantpoint__activity__name', 'sample__instantpoint__timevalue')
sql = postgresifySQL(str(qs_sp.query))
logger.debug('\n\nsql before query = %s\n\n', sql)
pq = PQuery(self.request)
sql = pq.addParameterValuesSelfJoins(sql, self.kwargs['parametervalues'], select_items=self.sampled_rest_select_items)
logger.debug('\n\nsql after parametervalue query = %s\n\n', sql)
qs_spq = SPQuerySet(sql, values_list)
else:
logger.debug('Building SPQuerySet for SampledParameter...')
qs_spq = SPQuerySet(None, values_list, qs_sp=qs_sp)
else:
logger.debug('Building SPQuerySet for SampledParameter...')
qs_spq = SPQuerySet(None, values_list, qs_sp=qs_sp)
if qs_spq is None:
logger.debug('qs_spq.query = %s', str(qs_spq.query))
else:
logger.debug("No queryset returned for qparams = %s", pprint.pformat(qparams))
return qs_spq
def getMPCount(self):
'''
Get the actual count of measured parameters giving the exising query. If private _count
member variable exist return that, otherwise expand the query set as necessary to get and
return the count.
'''
if not self._count:
logger.debug('self._count does not exist, getting count...')
if self.initialQuery:
logger.debug('... getting initialCount from simple QuerySet')
self._count = MeasuredParameter.objects.using(self.request.META['dbAlias']).count()
else:
try:
self._count = self.qs_mp_no_order.count()
except AttributeError, e:
raise Exception('Could not get Measured Parameter count: %s' % e)
logger.debug('self._count = %d', self._count)
return int(self._count)
def getLocalizedMPCount(self):
'''
Apply commas to the count number and return as a string
'''
locale.setlocale(locale.LC_ALL, 'en_US')
return locale.format("%d", self.getMPCount(), grouping=True)
def getMeasuredParametersPostgreSQL(self):
'''
Return SQL string that can be executed against the postgres database
'''
sql = 'Check "Get actual count" checkbox to see the SQL for your data selection'
if not self._count:
logger.debug('Calling self.getMPCount()...')
self._count = self.getMPCount()
if self._count:
self.qs_mp = self.getMeasuredParametersQS(MPQuerySet.rest_columns)
if self.qs_mp:
logger.debug('type(self.qs_mp) = %s', type(self.qs_mp))
sql = str(self.qs_mp.query)
sql = sqlparse.format(sql, reindent=True, keyword_case='upper')
# Fix up the formatting
sql = sql.replace('INNER JOIN', ' INNER JOIN')
sql = sql.replace(' WHERE', '\nWHERE ')
p = re.compile('\s+AND')
sql = p.sub('\n AND', sql)
logger.debug('sql = %s', sql)
return sql
def getSampledParametersPostgreSQL(self):
'''
Return SQL string that can be executed against the postgres database
'''
self.qs_sp = self.getSampledParametersQS(SPQuerySet.rest_columns)
if self.qs_sp:
logger.debug('type(self.qs_sp) = %s', type(self.qs_sp))
sql = str(self.qs_sp.query)
sql = sqlparse.format(sql, reindent=True, keyword_case='upper')
# Fix up the formatting
sql = sql.replace('INNER JOIN', ' INNER JOIN')
sql = sql.replace(' WHERE', '\nWHERE ')
p = re.compile('\s+AND')
sql = p.sub('\n AND', sql)
logger.debug('sql = %s', sql)
return sql
| gpl-3.0 | -7,650,626,417,622,582,000 | 48.91688 | 145 | 0.575509 | false |
hojel/calibre | src/calibre/gui2/device.py | 2 | 88265 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
# Imports {{{
import os, traceback, Queue, time, cStringIO, re, sys, weakref
from threading import Thread, Event
from PyQt5.Qt import (
QMenu, QAction, QActionGroup, QIcon, Qt, pyqtSignal, QDialog,
QObject, QVBoxLayout, QDialogButtonBox, QCursor, QCoreApplication,
QApplication, QEventLoop)
from calibre.customize.ui import (available_input_formats, available_output_formats,
device_plugins, disabled_device_plugins)
from calibre.devices.interface import DevicePlugin, currently_connected_device
from calibre.devices.errors import (UserFeedback, OpenFeedback, OpenFailed,
InitialConnectionError)
from calibre.gui2.dialogs.choose_format_device import ChooseFormatDeviceDialog
from calibre.utils.ipc.job import BaseJob
from calibre.devices.scanner import DeviceScanner
from calibre.gui2 import (config, error_dialog, Dispatcher, dynamic,
warning_dialog, info_dialog, choose_dir, FunctionDispatcher,
show_restart_warning, gprefs, question_dialog)
from calibre.ebooks.metadata import authors_to_string
from calibre import preferred_encoding, prints, force_unicode, as_unicode, sanitize_file_name2
from calibre.utils.filenames import ascii_filename
from calibre.devices.errors import (FreeSpaceError, WrongDestinationError,
BlacklistedDevice)
from calibre.devices.apple.driver import ITUNES_ASYNC
from calibre.devices.folder_device.driver import FOLDER_DEVICE
from calibre.devices.bambook.driver import BAMBOOK, BAMBOOKWifi
from calibre.constants import DEBUG
from calibre.utils.config import tweaks, device_prefs
from calibre.utils.magick.draw import thumbnail
from calibre.library.save_to_disk import find_plugboard
from calibre.ptempfile import PersistentTemporaryFile, force_unicode as filename_to_unicode
# }}}
class DeviceJob(BaseJob): # {{{
def __init__(self, func, done, job_manager, args=[], kwargs={},
description=''):
BaseJob.__init__(self, description)
self.func = func
self.callback_on_done = done
if not isinstance(self.callback_on_done, (Dispatcher,
FunctionDispatcher)):
self.callback_on_done = FunctionDispatcher(self.callback_on_done)
self.args, self.kwargs = args, kwargs
self.exception = None
self.job_manager = job_manager
self._details = _('No details available.')
self._aborted = False
def start_work(self):
if DEBUG:
prints('Job:', self.id, self.description, 'started',
safe_encode=True)
self.start_time = time.time()
self.job_manager.changed_queue.put(self)
def job_done(self):
self.duration = time.time() - self.start_time
self.percent = 1
if DEBUG:
prints('DeviceJob:', self.id, self.description,
'done, calling callback', safe_encode=True)
try:
self.callback_on_done(self)
except:
pass
if DEBUG:
prints('DeviceJob:', self.id, self.description,
'callback returned', safe_encode=True)
self.job_manager.changed_queue.put(self)
def report_progress(self, percent, msg=''):
self.notifications.put((percent, msg))
self.job_manager.changed_queue.put(self)
def run(self):
self.start_work()
try:
self.result = self.func(*self.args, **self.kwargs)
if self._aborted:
return
except (Exception, SystemExit) as err:
if self._aborted:
return
self.failed = True
ex = as_unicode(err)
self._details = ex + '\n\n' + \
force_unicode(traceback.format_exc())
self.exception = err
finally:
self.job_done()
def abort(self, err):
call_job_done = False
if self.run_state == self.WAITING:
self.start_work()
call_job_done = True
self._aborted = True
self.failed = True
self._details = unicode(err)
self.exception = err
if call_job_done:
self.job_done()
@property
def log_file(self):
return cStringIO.StringIO(self._details.encode('utf-8'))
# }}}
def device_name_for_plugboards(device_class):
if hasattr(device_class, 'DEVICE_PLUGBOARD_NAME'):
return device_class.DEVICE_PLUGBOARD_NAME
return device_class.__class__.__name__
class BusyCursor(object):
def __enter__(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
def __exit__(self, *args):
QApplication.restoreOverrideCursor()
class DeviceManager(Thread): # {{{
def __init__(self, connected_slot, job_manager, open_feedback_slot,
open_feedback_msg, allow_connect_slot,
after_callback_feedback_slot, sleep_time=2):
'''
:sleep_time: Time to sleep between device probes in secs
'''
Thread.__init__(self)
self.setDaemon(True)
# [Device driver, Showing in GUI, Ejected]
self.devices = list(device_plugins())
self.disabled_device_plugins = list(disabled_device_plugins())
self.managed_devices = [x for x in self.devices if
not x.MANAGES_DEVICE_PRESENCE]
self.unmanaged_devices = [x for x in self.devices if
x.MANAGES_DEVICE_PRESENCE]
self.sleep_time = sleep_time
self.connected_slot = connected_slot
self.allow_connect_slot = allow_connect_slot
self.jobs = Queue.Queue(0)
self.job_steps = Queue.Queue(0)
self.keep_going = True
self.job_manager = job_manager
self.reported_errors = set([])
self.current_job = None
self.scanner = DeviceScanner()
self.connected_device = None
self.connected_device_kind = None
self.ejected_devices = set([])
self.mount_connection_requests = Queue.Queue(0)
self.open_feedback_slot = open_feedback_slot
self.after_callback_feedback_slot = after_callback_feedback_slot
self.open_feedback_msg = open_feedback_msg
self._device_information = None
self.current_library_uuid = None
self.call_shutdown_on_disconnect = False
self.devices_initialized = Event()
self.dynamic_plugins = {}
def report_progress(self, *args):
pass
@property
def is_device_connected(self):
return self.connected_device is not None
@property
def is_device_present(self):
return self.connected_device is not None and self.connected_device not in self.ejected_devices
@property
def device(self):
return self.connected_device
def do_connect(self, connected_devices, device_kind):
for dev, detected_device in connected_devices:
if dev.OPEN_FEEDBACK_MESSAGE is not None:
self.open_feedback_slot(dev.OPEN_FEEDBACK_MESSAGE)
try:
dev.reset(detected_device=detected_device,
report_progress=self.report_progress)
dev.open(detected_device, self.current_library_uuid)
except OpenFeedback as e:
if dev not in self.ejected_devices:
self.open_feedback_msg(dev.get_gui_name(), e)
self.ejected_devices.add(dev)
continue
except OpenFailed:
raise
except:
tb = traceback.format_exc()
if DEBUG or tb not in self.reported_errors:
self.reported_errors.add(tb)
prints('Unable to open device', str(dev))
prints(tb)
continue
self.after_device_connect(dev, device_kind)
return True
return False
def after_device_connect(self, dev, device_kind):
allow_connect = True
try:
uid = dev.get_device_uid()
except NotImplementedError:
uid = None
asked = gprefs.get('ask_to_manage_device', [])
if (dev.ASK_TO_ALLOW_CONNECT and uid and uid not in asked):
if not self.allow_connect_slot(dev.get_gui_name(), dev.icon):
allow_connect = False
asked.append(uid)
gprefs.set('ask_to_manage_device', asked)
if not allow_connect:
dev.ignore_connected_device(uid)
return
self.connected_device = currently_connected_device._device = dev
self.connected_device.specialize_global_preferences(device_prefs)
self.connected_device_kind = device_kind
self.connected_slot(True, device_kind)
def connected_device_removed(self):
while True:
try:
job = self.jobs.get_nowait()
job.abort(Exception(_('Device no longer connected.')))
except Queue.Empty:
break
try:
self.connected_device.post_yank_cleanup()
except:
pass
if self.connected_device in self.ejected_devices:
self.ejected_devices.remove(self.connected_device)
else:
self.connected_slot(False, self.connected_device_kind)
if self.call_shutdown_on_disconnect:
# The current device is an instance of a plugin class instantiated
# to handle this connection, probably as a mounted device. We are
# now abandoning the instance that we created, so we tell it that it
# is being shut down.
self.connected_device.shutdown()
self.call_shutdown_on_disconnect = False
device_prefs.set_overrides()
self.connected_device = currently_connected_device._device = None
self._device_information = None
def detect_device(self):
self.scanner.scan()
if self.is_device_connected:
if self.connected_device.MANAGES_DEVICE_PRESENCE:
cd = self.connected_device.detect_managed_devices(self.scanner.devices)
if cd is None:
self.connected_device_removed()
else:
connected, detected_device = \
self.scanner.is_device_connected(self.connected_device,
only_presence=True)
if not connected:
if DEBUG:
# Allow the device subsystem to output debugging info about
# why it thinks the device is not connected. Used, for e.g.
# in the can_handle() method of the T1 driver
self.scanner.is_device_connected(self.connected_device,
only_presence=True, debug=True)
self.connected_device_removed()
else:
for dev in self.unmanaged_devices:
try:
cd = dev.detect_managed_devices(self.scanner.devices)
except:
prints('Error during device detection for %s:'%dev)
traceback.print_exc()
else:
if cd is not None:
try:
dev.open(cd, self.current_library_uuid)
except BlacklistedDevice as e:
prints('Ignoring blacklisted device: %s'%
as_unicode(e))
except:
prints('Error while trying to open %s (Driver: %s)'%
(cd, dev))
traceback.print_exc()
else:
self.after_device_connect(dev, 'unmanaged-device')
return
try:
possibly_connected_devices = []
for device in self.managed_devices:
if device in self.ejected_devices:
continue
try:
possibly_connected, detected_device = \
self.scanner.is_device_connected(device)
except InitialConnectionError as e:
self.open_feedback_msg(device.get_gui_name(), e)
continue
if possibly_connected:
possibly_connected_devices.append((device, detected_device))
if possibly_connected_devices:
if not self.do_connect(possibly_connected_devices,
device_kind='device'):
if DEBUG:
prints('Connect to device failed, retrying in 5 seconds...')
time.sleep(5)
if not self.do_connect(possibly_connected_devices,
device_kind='device'):
if DEBUG:
prints('Device connect failed again, giving up')
except OpenFailed as e:
if e.show_me:
traceback.print_exc()
# Mount devices that don't use USB, such as the folder device and iTunes
# This will be called on the GUI thread. Because of this, we must store
# information that the scanner thread will use to do the real work.
def mount_device(self, kls, kind, path):
self.mount_connection_requests.put((kls, kind, path))
# disconnect a device
def umount_device(self, *args):
if self.is_device_connected and not self.job_manager.has_device_jobs():
if self.connected_device_kind in {'unmanaged-device', 'device'}:
self.connected_device.eject()
if self.connected_device_kind != 'unmanaged-device':
self.ejected_devices.add(self.connected_device)
self.connected_slot(False, self.connected_device_kind)
elif hasattr(self.connected_device, 'unmount_device'):
# As we are on the wrong thread, this call must *not* do
# anything besides set a flag that the right thread will see.
self.connected_device.unmount_device()
def next(self):
if not self.job_steps.empty():
try:
return self.job_steps.get_nowait()
except Queue.Empty:
pass
if not self.jobs.empty():
try:
return self.jobs.get_nowait()
except Queue.Empty:
pass
def run_startup(self, dev):
name = 'unknown'
try:
name = dev.__class__.__name__
dev.startup()
except:
prints('Startup method for device %s threw exception'%name)
traceback.print_exc()
def run(self):
# Do any device-specific startup processing.
for d in self.devices:
self.run_startup(d)
n = d.is_dynamically_controllable()
if n:
self.dynamic_plugins[n] = d
self.devices_initialized.set()
while self.keep_going:
kls = None
while True:
try:
(kls,device_kind, folder_path) = \
self.mount_connection_requests.get_nowait()
except Queue.Empty:
break
if kls is not None:
try:
dev = kls(folder_path)
# We just created a new device instance. Call its startup
# method and set the flag to call the shutdown method when
# it disconnects.
self.run_startup(dev)
self.call_shutdown_on_disconnect = True
self.do_connect([[dev, None],], device_kind=device_kind)
except:
prints('Unable to open %s as device (%s)'%(device_kind, folder_path))
traceback.print_exc()
else:
self.detect_device()
do_sleep = True
while True:
job = self.next()
if job is not None:
do_sleep = False
self.current_job = job
if self.device is not None:
self.device.set_progress_reporter(job.report_progress)
self.current_job.run()
self.current_job = None
feedback = getattr(self.device, 'user_feedback_after_callback', None)
if feedback is not None:
self.device.user_feedback_after_callback = None
self.after_callback_feedback_slot(feedback)
else:
break
if do_sleep:
time.sleep(self.sleep_time)
# We are exiting. Call the shutdown method for each plugin
for p in self.devices:
try:
p.shutdown()
except:
pass
def create_job_step(self, func, done, description, to_job, args=[], kwargs={}):
job = DeviceJob(func, done, self.job_manager,
args=args, kwargs=kwargs, description=description)
self.job_manager.add_job(job)
if (done is None or isinstance(done, FunctionDispatcher)) and \
(to_job is not None and to_job == self.current_job):
self.job_steps.put(job)
else:
self.jobs.put(job)
return job
def create_job(self, func, done, description, args=[], kwargs={}):
return self.create_job_step(func, done, description, None, args, kwargs)
def has_card(self):
try:
return bool(self.device.card_prefix())
except:
return False
def _debug_detection(self):
from calibre.devices import debug
raw = debug(plugins=self.devices,
disabled_plugins=self.disabled_device_plugins)
return raw
def debug_detection(self, done):
if self.is_device_connected:
raise ValueError('Device is currently detected in calibre, cannot'
' debug device detection')
self.create_job(self._debug_detection, done,
_('Debug device detection'))
def _get_device_information(self):
info = self.device.get_device_information(end_session=False)
if len(info) < 5:
info = tuple(list(info) + [{}])
info = [i.replace('\x00', '').replace('\x01', '') if isinstance(i, basestring) else i
for i in info]
cp = self.device.card_prefix(end_session=False)
fs = self.device.free_space()
self._device_information = {'info': info, 'prefixes': cp, 'freespace': fs}
return info, cp, fs
def get_device_information(self, done, add_as_step_to_job=None):
'''Get device information and free space on device'''
return self.create_job_step(self._get_device_information, done,
description=_('Get device information'), to_job=add_as_step_to_job)
def _set_library_information(self, library_name, library_uuid, field_metadata):
'''Give the device the current library information'''
self.device.set_library_info(library_name, library_uuid, field_metadata)
def set_library_information(self, done, library_name, library_uuid,
field_metadata, add_as_step_to_job=None):
'''Give the device the current library information'''
return self.create_job_step(self._set_library_information, done,
args=[library_name, library_uuid, field_metadata],
description=_('Set library information'), to_job=add_as_step_to_job)
def slow_driveinfo(self):
''' Update the stored device information with the driveinfo if the
device indicates that getting driveinfo is slow '''
info = self._device_information['info']
if (not info[4] and self.device.SLOW_DRIVEINFO):
info = list(info)
info[4] = self.device.get_driveinfo()
self._device_information['info'] = tuple(info)
def get_current_device_information(self):
return self._device_information
def _books(self):
'''Get metadata from device'''
mainlist = self.device.books(oncard=None, end_session=False)
cardalist = self.device.books(oncard='carda')
cardblist = self.device.books(oncard='cardb')
return (mainlist, cardalist, cardblist)
def books(self, done, add_as_step_to_job=None):
'''Return callable that returns the list of books on device as two booklists'''
return self.create_job_step(self._books, done,
description=_('Get list of books on device'), to_job=add_as_step_to_job)
def _prepare_addable_books(self, paths):
return self.device.prepare_addable_books(paths)
def prepare_addable_books(self, done, paths, add_as_step_to_job=None):
return self.create_job_step(self._prepare_addable_books, done, args=[paths],
description=_('Prepare files for transfer from device'),
to_job=add_as_step_to_job)
def _annotations(self, path_map):
return self.device.get_annotations(path_map)
def annotations(self, done, path_map, add_as_step_to_job=None):
'''Return mapping of ids to annotations. Each annotation is of the
form (type, location_info, content). path_map is a mapping of
ids to paths on the device.'''
return self.create_job_step(self._annotations, done, args=[path_map],
description=_('Get annotations from device'), to_job=add_as_step_to_job)
def _sync_booklists(self, booklists):
'''Sync metadata to device'''
self.device.sync_booklists(booklists, end_session=False)
return self.device.card_prefix(end_session=False), self.device.free_space()
def sync_booklists(self, done, booklists, plugboards, add_as_step_to_job=None):
if hasattr(self.connected_device, 'set_plugboards') and \
callable(self.connected_device.set_plugboards):
self.connected_device.set_plugboards(plugboards, find_plugboard)
return self.create_job_step(self._sync_booklists, done, args=[booklists],
description=_('Send metadata to device'), to_job=add_as_step_to_job)
def upload_collections(self, done, booklist, on_card, add_as_step_to_job=None):
return self.create_job_step(booklist.rebuild_collections, done,
args=[booklist, on_card],
description=_('Send collections to device'),
to_job=add_as_step_to_job)
def _upload_books(self, files, names, on_card=None, metadata=None, plugboards=None):
'''Upload books to device: '''
from calibre.ebooks.metadata.meta import set_metadata
if hasattr(self.connected_device, 'set_plugboards') and \
callable(self.connected_device.set_plugboards):
self.connected_device.set_plugboards(plugboards, find_plugboard)
if metadata and files and len(metadata) == len(files):
for f, mi in zip(files, metadata):
if isinstance(f, unicode):
ext = f.rpartition('.')[-1].lower()
cpb = find_plugboard(
device_name_for_plugboards(self.connected_device),
ext, plugboards)
if ext:
try:
if DEBUG:
prints('Setting metadata in:', mi.title, 'at:',
f, file=sys.__stdout__)
with open(f, 'r+b') as stream:
if cpb:
newmi = mi.deepcopy_metadata()
newmi.template_to_attribute(mi, cpb)
else:
newmi = mi
nuke_comments = getattr(self.connected_device,
'NUKE_COMMENTS', None)
if nuke_comments is not None:
mi.comments = nuke_comments
set_metadata(stream, newmi, stream_type=ext)
except:
if DEBUG:
prints(traceback.format_exc(), file=sys.__stdout__)
try:
return self.device.upload_books(files, names, on_card,
metadata=metadata, end_session=False)
finally:
if metadata:
for mi in metadata:
try:
if mi.cover:
os.remove(mi.cover)
except:
pass
def upload_books(self, done, files, names, on_card=None, titles=None,
metadata=None, plugboards=None, add_as_step_to_job=None):
desc = _('Upload %d books to device')%len(names)
if titles:
desc += u':' + u', '.join(titles)
return self.create_job_step(self._upload_books, done, to_job=add_as_step_to_job,
args=[files, names],
kwargs={'on_card':on_card,'metadata':metadata,'plugboards':plugboards}, description=desc)
def add_books_to_metadata(self, locations, metadata, booklists):
self.device.add_books_to_metadata(locations, metadata, booklists)
def _delete_books(self, paths):
'''Remove books from device'''
self.device.delete_books(paths, end_session=True)
def delete_books(self, done, paths, add_as_step_to_job=None):
return self.create_job_step(self._delete_books, done, args=[paths],
description=_('Delete books from device'),
to_job=add_as_step_to_job)
def remove_books_from_metadata(self, paths, booklists):
self.device.remove_books_from_metadata(paths, booklists)
def _save_books(self, paths, target):
'''Copy books from device to disk'''
for path in paths:
name = sanitize_file_name2(os.path.basename(path))
dest = os.path.join(target, name)
if os.path.abspath(dest) != os.path.abspath(path):
with open(dest, 'wb') as f:
self.device.get_file(path, f)
def save_books(self, done, paths, target, add_as_step_to_job=None):
return self.create_job_step(self._save_books, done, args=[paths, target],
description=_('Download books from device'),
to_job=add_as_step_to_job)
def _view_book(self, path, target):
with open(target, 'wb') as f:
self.device.get_file(path, f)
return target
def view_book(self, done, path, target, add_as_step_to_job=None):
return self.create_job_step(self._view_book, done, args=[path, target],
description=_('View book on device'), to_job=add_as_step_to_job)
def set_current_library_uuid(self, uuid):
self.current_library_uuid = uuid
def set_driveinfo_name(self, location_code, name):
if self.connected_device:
self.connected_device.set_driveinfo_name(location_code, name)
# dynamic plugin interface
# This is a helper function that handles queueing with the device manager
def _call_request(self, name, method, *args, **kwargs):
d = self.dynamic_plugins.get(name, None)
if d:
return getattr(d, method)(*args, **kwargs)
return kwargs.get('default', None)
# The dynamic plugin methods below must be called on the GUI thread. They
# will switch to the device thread before calling the plugin.
def start_plugin(self, name):
return self._call_request(name, 'start_plugin')
def stop_plugin(self, name):
self._call_request(name, 'stop_plugin')
def get_option(self, name, opt_string, default=None):
return self._call_request(name, 'get_option', opt_string, default=default)
def set_option(self, name, opt_string, opt_value):
self._call_request(name, 'set_option', opt_string, opt_value)
def is_running(self, name):
if self._call_request(name, 'is_running'):
return True
return False
def is_enabled(self, name):
try:
d = self.dynamic_plugins.get(name, None)
if d:
return True
except:
pass
return False
# }}}
class DeviceAction(QAction): # {{{
a_s = pyqtSignal(object)
def __init__(self, dest, delete, specific, icon_path, text, parent=None):
QAction.__init__(self, QIcon(icon_path), text, parent)
self.dest = dest
self.delete = delete
self.specific = specific
self.triggered.connect(self.emit_triggered)
def emit_triggered(self, *args):
self.a_s.emit(self)
def __repr__(self):
return self.__class__.__name__ + ':%s:%s:%s'%(self.dest, self.delete,
self.specific)
# }}}
class DeviceMenu(QMenu): # {{{
fetch_annotations = pyqtSignal()
disconnect_mounted_device = pyqtSignal()
sync = pyqtSignal(object, object, object)
def __init__(self, parent=None):
QMenu.__init__(self, parent)
self.group = QActionGroup(self)
self._actions = []
self._memory = []
self.set_default_menu = QMenu(_('Set default send to device action'))
self.set_default_menu.setIcon(QIcon(I('config.png')))
basic_actions = [
('main:', False, False, I('reader.png'),
_('Send to main memory')),
('carda:0', False, False, I('sd.png'),
_('Send to storage card A')),
('cardb:0', False, False, I('sd.png'),
_('Send to storage card B')),
]
delete_actions = [
('main:', True, False, I('reader.png'),
_('Main Memory')),
('carda:0', True, False, I('sd.png'),
_('Storage Card A')),
('cardb:0', True, False, I('sd.png'),
_('Storage Card B')),
]
specific_actions = [
('main:', False, True, I('reader.png'),
_('Main Memory')),
('carda:0', False, True, I('sd.png'),
_('Storage Card A')),
('cardb:0', False, True, I('sd.png'),
_('Storage Card B')),
]
later_menus = []
for menu in (self, self.set_default_menu):
for actions, desc in (
(basic_actions, ''),
(specific_actions, _('Send specific format to')),
(delete_actions, _('Send and delete from library')),
):
mdest = menu
if actions is not basic_actions:
mdest = QMenu(desc)
self._memory.append(mdest)
later_menus.append(mdest)
if menu is self.set_default_menu:
menu.addMenu(mdest)
menu.addSeparator()
for dest, delete, specific, icon, text in actions:
action = DeviceAction(dest, delete, specific, icon, text, self)
self._memory.append(action)
if menu is self.set_default_menu:
action.setCheckable(True)
action.setText(action.text())
self.group.addAction(action)
else:
action.a_s.connect(self.action_triggered)
self._actions.append(action)
mdest.addAction(action)
if actions is basic_actions:
menu.addSeparator()
da = config['default_send_to_device_action']
done = False
for action in self.group.actions():
if repr(action) == da:
action.setChecked(True)
done = True
break
if not done:
action = list(self.group.actions())[0]
action.setChecked(True)
config['default_send_to_device_action'] = repr(action)
self.group.triggered.connect(self.change_default_action)
self.addSeparator()
self.addMenu(later_menus[0])
self.addSeparator()
mitem = self.addAction(QIcon(I('eject.png')), _('Eject device'))
mitem.setEnabled(False)
mitem.triggered.connect(lambda x : self.disconnect_mounted_device.emit())
self.disconnect_mounted_device_action = mitem
self.addSeparator()
self.addMenu(self.set_default_menu)
self.addSeparator()
self.addMenu(later_menus[1])
self.addSeparator()
annot = self.addAction(_('Fetch annotations (experimental)'))
annot.setEnabled(False)
annot.triggered.connect(lambda x :
self.fetch_annotations.emit())
self.annotation_action = annot
self.enable_device_actions(False)
def change_default_action(self, action):
config['default_send_to_device_action'] = repr(action)
action.setChecked(True)
def action_triggered(self, action):
self.sync.emit(action.dest, action.delete, action.specific)
def trigger_default(self, *args):
r = config['default_send_to_device_action']
for action in self._actions:
if repr(action) == r:
self.action_triggered(action)
break
def enable_device_actions(self, enable, card_prefix=(None, None),
device=None):
for action in self._actions:
if action.dest in ('main:', 'carda:0', 'cardb:0'):
if not enable:
action.setEnabled(False)
else:
if action.dest == 'main:':
action.setEnabled(True)
elif action.dest == 'carda:0':
if card_prefix and card_prefix[0] is not None:
action.setEnabled(True)
else:
action.setEnabled(False)
elif action.dest == 'cardb:0':
if card_prefix and card_prefix[1] is not None:
action.setEnabled(True)
else:
action.setEnabled(False)
annot_enable = enable and getattr(device, 'SUPPORTS_ANNOTATIONS', False)
self.annotation_action.setEnabled(annot_enable)
# }}}
class DeviceSignals(QObject): # {{{
#: This signal is emitted once, after metadata is downloaded from the
#: connected device.
#: The sequence: gui.device_manager.is_device_connected will become True,
#: and the device_connection_changed signal will be emitted,
#: then sometime later gui.device_metadata_available will be signaled.
#: This does not mean that there are no more jobs running. Automatic metadata
#: management might have kicked off a sync_booklists to write new metadata onto
#: the device, and that job might still be running when the signal is emitted.
device_metadata_available = pyqtSignal()
#: This signal is emitted once when the device is detected and once when
#: it is disconnected. If the parameter is True, then it is a connection,
#: otherwise a disconnection.
device_connection_changed = pyqtSignal(object)
device_signals = DeviceSignals()
# }}}
class DeviceMixin(object): # {{{
def __init__(self, *args, **kwargs):
pass
def init_device_mixin(self):
self.device_error_dialog = error_dialog(self, _('Error'),
_('Error communicating with device'), ' ')
self.device_error_dialog.setModal(Qt.NonModal)
self.device_manager = DeviceManager(FunctionDispatcher(self.device_detected),
self.job_manager, Dispatcher(self.status_bar.show_message),
Dispatcher(self.show_open_feedback),
FunctionDispatcher(self.allow_connect), Dispatcher(self.after_callback_feedback))
self.device_manager.start()
self.device_manager.devices_initialized.wait()
if tweaks['auto_connect_to_folder']:
self.connect_to_folder_named(tweaks['auto_connect_to_folder'])
def allow_connect(self, name, icon):
return question_dialog(self, _('Manage the %s?')%name,
_('Detected the <b>%s</b>. Do you want calibre to manage it?')%
name, show_copy_button=False,
override_icon=QIcon(icon))
def after_callback_feedback(self, feedback):
title, msg, det_msg = feedback
info_dialog(self, feedback['title'], feedback['msg'], det_msg=feedback['det_msg']).show()
def debug_detection(self, done):
self.debug_detection_callback = weakref.ref(done)
self.device_manager.debug_detection(FunctionDispatcher(self.debug_detection_done))
def debug_detection_done(self, job):
d = self.debug_detection_callback()
if d is not None:
d(job)
def show_open_feedback(self, devname, e):
try:
self.__of_dev_mem__ = d = e.custom_dialog(self)
except NotImplementedError:
self.__of_dev_mem__ = d = info_dialog(self, devname, e.feedback_msg)
d.show()
def auto_convert_question(self, msg, autos):
autos = u'\n'.join(map(unicode, map(force_unicode, autos)))
return self.ask_a_yes_no_question(
_('No suitable formats'), msg,
ans_when_user_unavailable=True,
det_msg=autos, skip_dialog_name='auto_convert_before_send'
)
def set_default_thumbnail(self, height):
img = I('book.png', data=True)
self.default_thumbnail = thumbnail(img, height, height)
def connect_to_folder_named(self, folder):
if os.path.exists(folder) and os.path.isdir(folder):
self.device_manager.mount_device(kls=FOLDER_DEVICE, kind='folder',
path=folder)
def connect_to_folder(self):
dir = choose_dir(self, 'Select Device Folder',
_('Select folder to open as device'))
if dir is not None:
self.device_manager.mount_device(kls=FOLDER_DEVICE, kind='folder', path=dir)
def connect_to_bambook(self):
self.device_manager.mount_device(kls=BAMBOOKWifi, kind='bambook',
path=BAMBOOK.settings().extra_customization)
def connect_to_itunes(self):
self.device_manager.mount_device(kls=ITUNES_ASYNC, kind='itunes', path=None)
# disconnect from both folder and itunes devices
def disconnect_mounted_device(self):
self.device_manager.umount_device()
def configure_connected_device(self):
if not self.device_manager.is_device_connected:
return
if self.job_manager.has_device_jobs(queued_also=True):
return error_dialog(self, _('Running jobs'),
_('Cannot configure the device while there are running'
' device jobs.'), show=True)
dev = self.device_manager.connected_device
cw = dev.config_widget()
d = QDialog(self)
d.setWindowTitle(_('Configure %s')%dev.get_gui_name())
d.setWindowIcon(QIcon(I('config.png')))
l = QVBoxLayout(d)
d.setLayout(l)
bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
bb.accepted.connect(d.accept)
bb.rejected.connect(d.reject)
l.addWidget(cw)
l.addWidget(bb)
def validate():
if cw.validate():
QDialog.accept(d)
d.accept = validate
if d.exec_() == d.Accepted:
dev.save_settings(cw)
do_restart = show_restart_warning(_('Restart calibre for the changes to %s'
' to be applied.')%dev.get_gui_name(), parent=self)
if do_restart:
self.quit(restart=True)
def _sync_action_triggered(self, *args):
m = getattr(self, '_sync_menu', None)
if m is not None:
m.trigger_default()
def create_device_menu(self):
self._sync_menu = DeviceMenu(self)
self.iactions['Send To Device'].qaction.setMenu(self._sync_menu)
self.iactions['Connect Share'].build_email_entries()
self._sync_menu.sync.connect(self.dispatch_sync_event)
self._sync_menu.fetch_annotations.connect(
self.iactions['Fetch Annotations'].fetch_annotations)
self._sync_menu.disconnect_mounted_device.connect(self.disconnect_mounted_device)
self.iactions['Connect Share'].set_state(self.device_connected,
None)
if self.device_connected:
self._sync_menu.disconnect_mounted_device_action.setEnabled(True)
else:
self._sync_menu.disconnect_mounted_device_action.setEnabled(False)
def device_job_exception(self, job):
'''
Handle exceptions in threaded device jobs.
'''
if isinstance(getattr(job, 'exception', None), UserFeedback):
ex = job.exception
func = {UserFeedback.ERROR:error_dialog,
UserFeedback.WARNING:warning_dialog,
UserFeedback.INFO:info_dialog}[ex.level]
return func(self, _('Failed'), ex.msg, det_msg=ex.details if
ex.details else '', show=True)
try:
if 'Could not read 32 bytes on the control bus.' in \
unicode(job.details):
error_dialog(self, _('Error talking to device'),
_('There was a temporary error talking to the '
'device. Please unplug and reconnect the device '
'or reboot.')).show()
return
except:
pass
if getattr(job, 'exception', None).__class__.__name__ == 'MTPInvalidSendPathError':
try:
from calibre.gui2.device_drivers.mtp_config import SendError
return SendError(self, job.exception).exec_()
except:
traceback.print_exc()
try:
prints(job.details, file=sys.stderr)
except:
pass
if not self.device_error_dialog.isVisible():
self.device_error_dialog.set_details(job.details)
self.device_error_dialog.show()
# Device connected {{{
def set_device_menu_items_state(self, connected):
self.iactions['Connect Share'].set_state(connected,
self.device_manager.device)
if connected:
self._sync_menu.disconnect_mounted_device_action.setEnabled(True)
self._sync_menu.enable_device_actions(True,
self.device_manager.device.card_prefix(),
self.device_manager.device)
self.eject_action.setEnabled(True)
else:
self._sync_menu.disconnect_mounted_device_action.setEnabled(False)
self._sync_menu.enable_device_actions(False)
self.eject_action.setEnabled(False)
def device_detected(self, connected, device_kind):
'''
Called when a device is connected to the computer.
'''
# This can happen as this function is called in a queued connection and
# the user could have yanked the device in the meantime
if connected and not self.device_manager.is_device_connected:
connected = False
self.set_device_menu_items_state(connected)
if connected:
self.device_connected = device_kind
self.device_manager.get_device_information(
FunctionDispatcher(self.info_read))
self.set_default_thumbnail(
self.device_manager.device.THUMBNAIL_HEIGHT)
self.status_bar.show_message(_('Device: ')+
self.device_manager.device.get_gui_name()+
_(' detected.'), 3000)
self.library_view.set_device_connected(self.device_connected)
self.refresh_ondevice(reset_only=True)
else:
self.device_connected = None
self.status_bar.device_disconnected()
if self.current_view() != self.library_view:
self.book_details.reset_info()
self.location_manager.update_devices()
self.bars_manager.update_bars(reveal_bar=True)
self.library_view.set_device_connected(self.device_connected)
# Empty any device view information
self.memory_view.set_database([])
self.card_a_view.set_database([])
self.card_b_view.set_database([])
self.refresh_ondevice()
device_signals.device_connection_changed.emit(connected)
def info_read(self, job):
'''
Called once device information has been read.
'''
if job.failed:
return self.device_job_exception(job)
info, cp, fs = job.result
self.location_manager.update_devices(cp, fs,
self.device_manager.device.icon)
self.bars_manager.update_bars(reveal_bar=True)
self.status_bar.device_connected(info[0])
db = self.current_db
self.device_manager.set_library_information(None, os.path.basename(db.library_path),
db.library_id, db.field_metadata,
add_as_step_to_job=job)
self.device_manager.books(FunctionDispatcher(self.metadata_downloaded),
add_as_step_to_job=job)
def metadata_downloaded(self, job):
'''
Called once metadata has been read for all books on the device.
'''
if job.failed:
self.device_job_exception(job)
return
self.device_manager.slow_driveinfo()
# set_books_in_library might schedule a sync_booklists job
if DEBUG:
prints('DeviceJob: metadata_downloaded: Starting set_books_in_library')
self.set_books_in_library(job.result, reset=True, add_as_step_to_job=job)
if DEBUG:
prints('DeviceJob: metadata_downloaded: updating views')
mainlist, cardalist, cardblist = job.result
self.memory_view.set_database(mainlist)
self.memory_view.set_editable(self.device_manager.device.CAN_SET_METADATA,
self.device_manager.device.BACKLOADING_ERROR_MESSAGE
is None)
self.card_a_view.set_database(cardalist)
self.card_a_view.set_editable(self.device_manager.device.CAN_SET_METADATA,
self.device_manager.device.BACKLOADING_ERROR_MESSAGE
is None)
self.card_b_view.set_database(cardblist)
self.card_b_view.set_editable(self.device_manager.device.CAN_SET_METADATA,
self.device_manager.device.BACKLOADING_ERROR_MESSAGE
is None)
if DEBUG:
prints('DeviceJob: metadata_downloaded: syncing')
self.sync_news()
self.sync_catalogs()
if DEBUG:
prints('DeviceJob: metadata_downloaded: refreshing ondevice')
self.refresh_ondevice()
if DEBUG:
prints('DeviceJob: metadata_downloaded: sending metadata_available signal')
device_signals.device_metadata_available.emit()
def refresh_ondevice(self, reset_only=False):
'''
Force the library view to refresh, taking into consideration new
device books information
'''
with self.library_view.preserve_state():
self.book_on_device(None, reset=True)
if reset_only:
return
self.library_view.model().refresh_ondevice()
# }}}
def remove_paths(self, paths):
return self.device_manager.delete_books(
FunctionDispatcher(self.books_deleted), paths)
def books_deleted(self, job):
'''
Called once deletion is done on the device
'''
cv, row = self.current_view(), -1
if cv is not self.library_view:
row = cv.currentIndex().row()
for view in (self.memory_view, self.card_a_view, self.card_b_view):
view.model().deletion_done(job, job.failed)
if job.failed:
self.device_job_exception(job)
return
dm = self.iactions['Remove Books'].delete_memory
if job in dm:
paths, model = dm.pop(job)
self.device_manager.remove_books_from_metadata(paths,
self.booklists())
model.paths_deleted(paths)
# Force recomputation the library's ondevice info. We need to call
# set_books_in_library even though books were not added because
# the deleted book might have been an exact match. Upload the booklists
# if set_books_in_library did not.
if not self.set_books_in_library(self.booklists(), reset=True,
add_as_step_to_job=job, do_device_sync=False):
self.upload_booklists(job)
# We need to reset the ondevice flags in the library. Use a big hammer,
# so we don't need to worry about whether some succeeded or not.
self.refresh_ondevice()
if row > -1:
cv.set_current_row(row)
try:
if not self.current_view().currentIndex().isValid():
self.current_view().set_current_row()
self.current_view().refresh_book_details()
except:
traceback.print_exc()
def dispatch_sync_event(self, dest, delete, specific):
rows = self.library_view.selectionModel().selectedRows()
if not rows or len(rows) == 0:
error_dialog(self, _('No books'), _('No books')+' '+
_('selected to send')).exec_()
return
fmt = None
if specific:
if (not self.device_connected or not self.device_manager or
self.device_manager.device is None):
error_dialog(self, _('No device'),
_('No device connected'), show=True)
return
formats = []
aval_out_formats = available_output_formats()
format_count = {}
for row in rows:
fmts = self.library_view.model().db.formats(row.row())
if fmts:
for f in fmts.split(','):
f = f.lower()
if f in format_count:
format_count[f] += 1
else:
format_count[f] = 1
for f in self.device_manager.device.settings().format_map:
if f in format_count.keys():
formats.append((f, _('%(num)i of %(total)i Books') % dict(
num=format_count[f], total=len(rows)),
True if f in aval_out_formats else False))
elif f in aval_out_formats:
formats.append((f, _('0 of %i Books') % len(rows), True))
d = ChooseFormatDeviceDialog(self, _('Choose format to send to device'), formats)
if d.exec_() != QDialog.Accepted:
return
if d.format():
fmt = d.format().lower()
dest, sub_dest = dest.partition(':')[0::2]
if dest in ('main', 'carda', 'cardb'):
if not self.device_connected or not self.device_manager:
error_dialog(self, _('No device'),
_('Cannot send: No device is connected')).exec_()
return
if dest == 'carda' and not self.device_manager.has_card():
error_dialog(self, _('No card'),
_('Cannot send: Device has no storage card')).exec_()
return
if dest == 'cardb' and not self.device_manager.has_card():
error_dialog(self, _('No card'),
_('Cannot send: Device has no storage card')).exec_()
return
if dest == 'main':
on_card = None
else:
on_card = dest
self.sync_to_device(on_card, delete, fmt)
elif dest == 'mail':
sub_dest_parts = sub_dest.split(';')
while len(sub_dest_parts) < 3:
sub_dest_parts.append('')
to = sub_dest_parts[0]
fmts = sub_dest_parts[1]
subject = ';'.join(sub_dest_parts[2:])
fmts = [x.strip().lower() for x in fmts.split(',')]
self.send_by_mail(to, fmts, delete, subject=subject)
elif dest == 'choosemail':
from calibre.gui2.email import select_recipients
data = select_recipients(self)
if data:
self.send_multiple_by_mail(data, delete)
def cover_to_thumbnail(self, data):
if self.device_manager.device and \
hasattr(self.device_manager.device, 'THUMBNAIL_WIDTH'):
try:
return thumbnail(data,
self.device_manager.device.THUMBNAIL_WIDTH,
self.device_manager.device.THUMBNAIL_HEIGHT,
preserve_aspect_ratio=False)
except:
pass
return
ht = self.device_manager.device.THUMBNAIL_HEIGHT \
if self.device_manager else DevicePlugin.THUMBNAIL_HEIGHT
try:
return thumbnail(data, ht, ht,
compression_quality=self.device_manager.device.THUMBNAIL_COMPRESSION_QUALITY)
except:
pass
def sync_catalogs(self, send_ids=None, do_auto_convert=True):
if self.device_connected:
settings = self.device_manager.device.settings()
ids = list(dynamic.get('catalogs_to_be_synced', set([]))) if send_ids is None else send_ids
ids = [id for id in ids if self.library_view.model().db.has_id(id)]
with BusyCursor():
files, _auto_ids = self.library_view.model().get_preferred_formats_from_ids(
ids, settings.format_map,
exclude_auto=do_auto_convert)
auto = []
if do_auto_convert and _auto_ids:
for id in _auto_ids:
dbfmts = self.library_view.model().db.formats(id, index_is_id=True)
formats = [] if dbfmts is None else \
[f.lower() for f in dbfmts.split(',')]
if set(formats).intersection(available_input_formats()) \
and set(settings.format_map).intersection(available_output_formats()):
auto.append(id)
if auto:
format = None
for fmt in settings.format_map:
if fmt in list(set(settings.format_map).intersection(set(available_output_formats()))):
format = fmt
break
if format is not None:
autos = [self.library_view.model().db.title(id, index_is_id=True) for id in auto]
if self.auto_convert_question(
_('Auto convert the following books before uploading to '
'the device?'), autos):
self.iactions['Convert Books'].auto_convert_catalogs(auto, format)
files = [f for f in files if f is not None]
if not files:
dynamic.set('catalogs_to_be_synced', set([]))
return
metadata = self.library_view.model().metadata_for(ids)
names = []
for mi in metadata:
prefix = ascii_filename(mi.title)
if not isinstance(prefix, unicode):
prefix = prefix.decode(preferred_encoding, 'replace')
prefix = ascii_filename(prefix)
names.append('%s_%d%s'%(prefix, id,
os.path.splitext(f)[1]))
self.update_thumbnail(mi)
dynamic.set('catalogs_to_be_synced', set([]))
if files:
remove = []
space = {self.location_manager.free[0] : None,
self.location_manager.free[1] : 'carda',
self.location_manager.free[2] : 'cardb'}
on_card = space.get(sorted(space.keys(), reverse=True)[0], None)
self.upload_books(files, names, metadata,
on_card=on_card,
memory=[files, remove])
self.status_bar.show_message(_('Sending catalogs to device.'), 5000)
@dynamic_property
def news_to_be_synced(self):
doc = 'Set of ids to be sent to device'
def fget(self):
ans = []
try:
ans = self.library_view.model().db.prefs.get('news_to_be_synced',
[])
except:
import traceback
traceback.print_exc()
return set(ans)
def fset(self, ids):
try:
self.library_view.model().db.new_api.set_pref('news_to_be_synced',
list(ids))
except:
import traceback
traceback.print_exc()
return property(fget=fget, fset=fset, doc=doc)
def sync_news(self, send_ids=None, do_auto_convert=True):
if self.device_connected:
del_on_upload = config['delete_news_from_library_on_upload']
settings = self.device_manager.device.settings()
ids = list(self.news_to_be_synced) if send_ids is None else send_ids
ids = [id for id in ids if self.library_view.model().db.has_id(id)]
with BusyCursor():
files, _auto_ids = self.library_view.model().get_preferred_formats_from_ids(
ids, settings.format_map,
exclude_auto=do_auto_convert)
auto = []
if do_auto_convert and _auto_ids:
for id in _auto_ids:
dbfmts = self.library_view.model().db.formats(id, index_is_id=True)
formats = [] if dbfmts is None else \
[f.lower() for f in dbfmts.split(',')]
if set(formats).intersection(available_input_formats()) \
and set(settings.format_map).intersection(available_output_formats()):
auto.append(id)
if auto:
format = None
for fmt in settings.format_map:
if fmt in list(set(settings.format_map).intersection(set(available_output_formats()))):
format = fmt
break
if format is not None:
autos = [self.library_view.model().db.title(id, index_is_id=True) for id in auto]
if self.auto_convert_question(
_('Auto convert the following books before uploading to '
'the device?'), autos):
self.iactions['Convert Books'].auto_convert_news(auto, format)
files = [f for f in files if f is not None]
if not files:
self.news_to_be_synced = set([])
return
metadata = self.library_view.model().metadata_for(ids)
names = []
for mi in metadata:
prefix = ascii_filename(mi.title)
if not isinstance(prefix, unicode):
prefix = prefix.decode(preferred_encoding, 'replace')
prefix = ascii_filename(prefix)
names.append('%s_%d%s'%(prefix, id,
os.path.splitext(f)[1]))
self.update_thumbnail(mi)
self.news_to_be_synced = set([])
if config['upload_news_to_device'] and files:
remove = ids if del_on_upload else []
space = {self.location_manager.free[0] : None,
self.location_manager.free[1] : 'carda',
self.location_manager.free[2] : 'cardb'}
on_card = space.get(sorted(space.keys(), reverse=True)[0], None)
try:
total_size = sum([os.stat(f).st_size for f in files])
except:
try:
import traceback
traceback.print_exc()
except:
pass
total_size = self.location_manager.free[0]
loc = tweaks['send_news_to_device_location']
loc_index = {"carda": 1, "cardb": 2}.get(loc, 0)
if self.location_manager.free[loc_index] > total_size + (1024**2):
# Send news to main memory if enough space available
# as some devices like the Nook Color cannot handle
# periodicals on SD cards properly
on_card = loc if loc in ('carda', 'cardb') else None
self.upload_books(files, names, metadata,
on_card=on_card,
memory=[files, remove])
self.status_bar.show_message(_('Sending news to device.'), 5000)
def sync_to_device(self, on_card, delete_from_library,
specific_format=None, send_ids=None, do_auto_convert=True):
ids = [self.library_view.model().id(r)
for r in self.library_view.selectionModel().selectedRows()] \
if send_ids is None else send_ids
if not self.device_manager or not ids or len(ids) == 0 or \
not self.device_manager.is_device_connected:
return
settings = self.device_manager.device.settings()
with BusyCursor():
_files, _auto_ids = self.library_view.model().get_preferred_formats_from_ids(ids,
settings.format_map,
specific_format=specific_format,
exclude_auto=do_auto_convert)
if do_auto_convert:
ok_ids = list(set(ids).difference(_auto_ids))
ids = [i for i in ids if i in ok_ids]
else:
_auto_ids = []
metadata = self.library_view.model().metadata_for(ids)
ids = iter(ids)
for mi in metadata:
self.update_thumbnail(mi)
imetadata = iter(metadata)
bad, good, gf, names, remove_ids = [], [], [], [], []
for f in _files:
mi = imetadata.next()
id = ids.next()
if f is None:
bad.append(mi.title)
else:
remove_ids.append(id)
good.append(mi)
gf.append(f)
t = mi.title
if not t:
t = _('Unknown')
a = mi.format_authors()
if not a:
a = _('Unknown')
prefix = ascii_filename(t+' - '+a)
if not isinstance(prefix, unicode):
prefix = prefix.decode(preferred_encoding, 'replace')
prefix = ascii_filename(prefix)
names.append('%s_%d%s'%(prefix, id, os.path.splitext(f)[1]))
remove = remove_ids if delete_from_library else []
self.upload_books(gf, names, good, on_card, memory=(_files, remove))
self.status_bar.show_message(_('Sending books to device.'), 5000)
auto = []
if _auto_ids != []:
for id in _auto_ids:
if specific_format is None:
formats = self.library_view.model().db.formats(id, index_is_id=True)
formats = formats.split(',') if formats is not None else []
formats = [f.lower().strip() for f in formats]
if (list(set(formats).intersection(available_input_formats())) != [] and
list(set(settings.format_map).intersection(available_output_formats())) != []):
auto.append(id)
else:
bad.append(self.library_view.model().db.title(id, index_is_id=True))
else:
if specific_format in list(set(settings.format_map).intersection(set(available_output_formats()))):
auto.append(id)
else:
bad.append(self.library_view.model().db.title(id, index_is_id=True))
if auto != []:
format = specific_format if specific_format in \
list(set(settings.format_map).intersection(set(available_output_formats()))) \
else None
if not format:
for fmt in settings.format_map:
if fmt in list(set(settings.format_map).intersection(set(available_output_formats()))):
format = fmt
break
if not format:
bad += auto
else:
autos = [self.library_view.model().db.title(id, index_is_id=True) for id in auto]
if self.auto_convert_question(
_('Auto convert the following books before uploading to '
'the device?'), autos):
self.iactions['Convert Books'].auto_convert(auto, on_card, format)
if bad:
bad = '\n'.join('%s'%(i,) for i in bad)
d = warning_dialog(self, _('No suitable formats'),
_('Could not upload the following books to the device, '
'as no suitable formats were found. Convert the book(s) to a '
'format supported by your device first.'
), bad)
d.exec_()
def upload_dirtied_booklists(self):
'''
Upload metadata to device.
'''
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
self.device_manager.sync_booklists(Dispatcher(lambda x: x),
self.booklists(), plugboards)
def upload_booklists(self, add_as_step_to_job=None):
'''
Upload metadata to device.
'''
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
self.device_manager.sync_booklists(FunctionDispatcher(self.metadata_synced),
self.booklists(), plugboards,
add_as_step_to_job=add_as_step_to_job)
def metadata_synced(self, job):
'''
Called once metadata has been uploaded.
'''
if job.failed:
self.device_job_exception(job)
return
cp, fs = job.result
self.location_manager.update_devices(cp, fs,
self.device_manager.device.icon)
# reset the views so that up-to-date info is shown. These need to be
# here because some drivers update collections in sync_booklists
cv, row = self.current_view(), -1
if cv is not self.library_view:
row = cv.currentIndex().row()
self.memory_view.reset()
self.card_a_view.reset()
self.card_b_view.reset()
if row > -1:
cv.set_current_row(row)
def _upload_collections(self, job):
if job.failed:
self.device_job_exception(job)
def upload_collections(self, booklist, view=None, oncard=None):
return self.device_manager.upload_collections(self._upload_collections,
booklist, oncard)
def upload_books(self, files, names, metadata, on_card=None, memory=None):
'''
Upload books to device.
:param files: List of either paths to files or file like objects
'''
titles = [i.title for i in metadata]
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
job = self.device_manager.upload_books(
FunctionDispatcher(self.books_uploaded),
files, names, on_card=on_card,
metadata=metadata, titles=titles, plugboards=plugboards
)
self.upload_memory[job] = (metadata, on_card, memory, files)
def books_uploaded(self, job):
'''
Called once books have been uploaded.
'''
metadata, on_card, memory, files = self.upload_memory.pop(job)
if job.exception is not None:
if isinstance(job.exception, FreeSpaceError):
where = 'in main memory.' if 'memory' in str(job.exception) \
else 'on the storage card.'
titles = '\n'.join(['<li>'+mi.title+'</li>'
for mi in metadata])
d = error_dialog(self, _('No space on device'),
_('<p>Cannot upload books to device there '
'is no more free space available ')+where+
'</p>\n<ul>%s</ul>'%(titles,))
d.exec_()
elif isinstance(job.exception, WrongDestinationError):
error_dialog(self, _('Incorrect destination'),
unicode(job.exception), show=True)
else:
self.device_job_exception(job)
return
try:
self.device_manager.add_books_to_metadata(job.result,
metadata, self.booklists())
except:
traceback.print_exc()
raise
books_to_be_deleted = []
if memory and memory[1]:
books_to_be_deleted = memory[1]
self.library_view.model().delete_books_by_id(books_to_be_deleted)
# There are some cases where sending a book to the device overwrites a
# book already there with a different book. This happens frequently in
# news. When this happens, the book match indication will be wrong
# because the UUID changed. Force both the device and the library view
# to refresh the flags. Set_books_in_library could upload the booklists.
# If it does not, then do it here.
if not self.set_books_in_library(self.booklists(), reset=True,
add_as_step_to_job=job, do_device_sync=False):
self.upload_booklists(job)
self.refresh_ondevice()
view = self.card_a_view if on_card == 'carda' else \
self.card_b_view if on_card == 'cardb' else self.memory_view
view.model().resort(reset=False)
view.model().research()
if files:
for f in files:
# Remove temporary files
try:
rem = not getattr(
self.device_manager.device,
'KEEP_TEMP_FILES_AFTER_UPLOAD', False)
if rem and 'caltmpfmt.' in f:
os.remove(f)
except:
pass
def update_metadata_on_device(self):
self.set_books_in_library(self.booklists(), reset=True, force_send=True)
self.refresh_ondevice()
def set_current_library_information(self, library_name, library_uuid, field_metadata):
self.device_manager.set_current_library_uuid(library_uuid)
if self.device_manager.is_device_connected:
self.device_manager.set_library_information(None, library_name,
library_uuid, field_metadata)
def book_on_device(self, id, reset=False):
'''
Return an indication of whether the given book represented by its db id
is on the currently connected device. It returns a 5 element list. The
first three elements represent memory locations main, carda, and cardb,
and are true if the book is identifiably in that memory. The fourth
is a count of how many instances of the book were found across all
the memory locations. The fifth is a set of paths to the
matching books on the device.
'''
loc = [None, None, None, 0, set([])]
if reset:
self.book_db_id_cache = None
self.book_db_id_counts = None
self.book_db_uuid_path_map = None
return
if not self.device_manager.is_device_connected or \
not hasattr(self, 'db_book_uuid_cache'):
return loc
if self.book_db_id_cache is None:
self.book_db_id_cache = []
self.book_db_id_counts = {}
self.book_db_uuid_path_map = {}
for i, l in enumerate(self.booklists()):
self.book_db_id_cache.append(set())
for book in l:
db_id = getattr(book, 'application_id', None)
if db_id is not None:
# increment the count of books on the device with this
# db_id.
self.book_db_id_cache[i].add(db_id)
if db_id not in self.book_db_uuid_path_map:
self.book_db_uuid_path_map[db_id] = set()
if getattr(book, 'lpath', False):
self.book_db_uuid_path_map[db_id].add(book.lpath)
c = self.book_db_id_counts.get(db_id, 0)
self.book_db_id_counts[db_id] = c + 1
for i, l in enumerate(self.booklists()):
if id in self.book_db_id_cache[i]:
loc[i] = True
loc[3] = self.book_db_id_counts.get(id, 0)
loc[4] |= self.book_db_uuid_path_map[id]
return loc
def update_thumbnail(self, book):
if book.cover and os.access(book.cover, os.R_OK):
book.thumbnail = self.cover_to_thumbnail(open(book.cover, 'rb').read())
else:
book.thumbnail = self.default_thumbnail
def set_books_in_library(self, booklists, reset=False, add_as_step_to_job=None,
force_send=False, do_device_sync=True):
'''
Set the ondevice indications in the device database.
This method should be called before book_on_device is called, because
it sets the application_id for matched books. Book_on_device uses that
to both speed up matching and to count matches.
'''
if not self.device_manager.is_device_connected:
return False
# It might be possible to get here without having initialized the
# library view. In this case, simply give up
try:
db = self.library_view.model().db
except:
return False
string_pat = re.compile('(?u)\W|[_]')
def clean_string(x):
x = x.lower() if x else ''
return string_pat.sub('', x)
update_metadata = (
device_prefs['manage_device_metadata'] == 'on_connect' or force_send)
get_covers = False
desired_thumbnail_height = 0
if update_metadata and self.device_manager.is_device_connected:
if self.device_manager.device.WANTS_UPDATED_THUMBNAILS:
get_covers = True
desired_thumbnail_height = self.device_manager.device.THUMBNAIL_HEIGHT
# Force a reset if the caches are not initialized
if reset or not hasattr(self, 'db_book_title_cache'):
# Build a cache (map) of the library, so the search isn't On**2
db_book_title_cache = {}
db_book_uuid_cache = {}
for id_ in db.data.iterallids():
title = clean_string(db.title(id_, index_is_id=True))
if title not in db_book_title_cache:
db_book_title_cache[title] = \
{'authors':{}, 'author_sort':{}, 'db_ids':{}}
# If there are multiple books in the library with the same title
# and author, then remember the last one. That is OK, because as
# we can't tell the difference between the books, one is as good
# as another.
authors = clean_string(db.authors(id_, index_is_id=True))
if authors:
db_book_title_cache[title]['authors'][authors] = id_
if db.author_sort(id_, index_is_id=True):
aus = clean_string(db.author_sort(id_, index_is_id=True))
db_book_title_cache[title]['author_sort'][aus] = id_
db_book_title_cache[title]['db_ids'][id_] = id_
db_book_uuid_cache[db.uuid(id_, index_is_id=True)] = id_
self.db_book_title_cache = db_book_title_cache
self.db_book_uuid_cache = db_book_uuid_cache
book_ids_to_refresh = set()
book_formats_to_send = []
books_with_future_dates = []
first_call_to_synchronize_with_db = [True]
def update_book(id_, book) :
if not update_metadata:
return
mi = db.get_metadata(id_, index_is_id=True, get_cover=get_covers)
book.smart_update(mi, replace_metadata=True)
if get_covers and desired_thumbnail_height != 0:
self.update_thumbnail(book)
def updateq(id_, book):
try:
if not update_metadata:
return False
if do_device_sync and self.device_manager.device is not None:
set_of_ids, (fmt_name, date_bad) = \
self.device_manager.device.synchronize_with_db(db, id_, book,
first_call_to_synchronize_with_db[0])
first_call_to_synchronize_with_db[0] = False
if date_bad:
books_with_future_dates.append(book.title)
elif fmt_name is not None:
book_formats_to_send.append((id_, fmt_name))
if set_of_ids is not None:
book_ids_to_refresh.update(set_of_ids)
return True
return (db.metadata_last_modified(id_, index_is_id=True) !=
getattr(book, 'last_modified', None) or
(isinstance(getattr(book, 'thumbnail', None), (list, tuple)) and
max(book.thumbnail[0], book.thumbnail[1]) != desired_thumbnail_height
)
)
except:
return True
# Now iterate through all the books on the device, setting the
# in_library field. If the UUID matches a book in the library, then
# do not consider that book for other matching. In all cases set
# the application_id to the db_id of the matching book. This value
# will be used by books_on_device to indicate matches. While we are
# going by, update the metadata for a book if automatic management is on
total_book_count = 0
for booklist in booklists:
for book in booklist:
if book:
total_book_count += 1
if DEBUG:
prints('DeviceJob: set_books_in_library: books to process=', total_book_count)
start_time = time.time()
with BusyCursor():
current_book_count = 0
for booklist in booklists:
for book in booklist:
if current_book_count % 100 == 0:
self.status_bar.show_message(
_('Analyzing books on the device: %d%% finished')%(
int((float(current_book_count)/total_book_count)*100.0)), show_notification=False)
# I am assuming that this sort-of multi-threading won't break
# anything. Reasons: excluding UI events prevents the user
# from explicitly changing anything, and (in theory) no
# changes are happening because of timers and the like.
# Why every tenth book? WAG balancing performance in the
# loop with preventing App Not Responding errors
if current_book_count % 10 == 0:
QCoreApplication.processEvents(
flags=QEventLoop.ExcludeUserInputEvents|QEventLoop.ExcludeSocketNotifiers)
current_book_count += 1
book.in_library = None
if getattr(book, 'uuid', None) in self.db_book_uuid_cache:
id_ = db_book_uuid_cache[book.uuid]
if updateq(id_, book):
update_book(id_, book)
book.in_library = 'UUID'
# ensure that the correct application_id is set
book.application_id = id_
continue
# No UUID exact match. Try metadata matching.
book_title = clean_string(book.title)
d = self.db_book_title_cache.get(book_title, None)
if d is not None:
# At this point we know that the title matches. The book
# will match if any of the db_id, author, or author_sort
# also match.
if getattr(book, 'application_id', None) in d['db_ids']:
id_ = getattr(book, 'application_id', None)
update_book(id_, book)
book.in_library = 'APP_ID'
# app_id already matches a db_id. No need to set it.
continue
# Sonys know their db_id independent of the application_id
# in the metadata cache. Check that as well.
if getattr(book, 'db_id', None) in d['db_ids']:
update_book(book.db_id, book)
book.in_library = 'DB_ID'
book.application_id = book.db_id
continue
# We now know that the application_id is not right. Set it
# to None to prevent book_on_device from accidentally
# matching on it. It will be set to a correct value below if
# the book is matched with one in the library
book.application_id = None
if book.authors:
# Compare against both author and author sort, because
# either can appear as the author
book_authors = clean_string(authors_to_string(book.authors))
if book_authors in d['authors']:
id_ = d['authors'][book_authors]
update_book(id_, book)
book.in_library = 'AUTHOR'
book.application_id = id_
elif book_authors in d['author_sort']:
id_ = d['author_sort'][book_authors]
update_book(id_, book)
book.in_library = 'AUTH_SORT'
book.application_id = id_
else:
# Book definitely not matched. Clear its application ID
book.application_id = None
# Set author_sort if it isn't already
asort = getattr(book, 'author_sort', None)
if not asort and book.authors:
book.author_sort = self.library_view.model().db.\
author_sort_from_authors(book.authors)
if update_metadata:
if self.device_manager.is_device_connected:
plugboards = self.library_view.model().db.prefs.get('plugboards', {})
self.device_manager.sync_booklists(
FunctionDispatcher(self.metadata_synced), booklists,
plugboards, add_as_step_to_job)
if book_ids_to_refresh:
try:
prints('DeviceJob: set_books_in_library refreshing GUI for ',
len(book_ids_to_refresh), 'books')
self.library_view.model().refresh_ids(book_ids_to_refresh,
current_row=self.library_view.currentIndex().row())
except:
# This shouldn't ever happen, but just in case ...
traceback.print_exc()
# Sync books if necessary
try:
files, names, metadata = [], [], []
for id_, fmt_name in book_formats_to_send:
if DEBUG:
prints('DeviceJob: Syncing book. id:', id_, 'name from device', fmt_name)
ext = os.path.splitext(fmt_name)[1][1:]
fmt_info = db.new_api.format_metadata(id_, ext)
if fmt_info:
try:
pt = PersistentTemporaryFile(suffix='caltmpfmt.'+ext)
db.new_api.copy_format_to(id_, ext, pt)
pt.close()
files.append(filename_to_unicode(os.path.abspath(pt.name)))
names.append(fmt_name)
mi = db.new_api.get_metadata(id_, get_cover=True)
self.update_thumbnail(mi)
metadata.append(mi)
except:
prints('Problem creating temporary file for', fmt_name)
traceback.print_exc()
else:
if DEBUG:
prints("DeviceJob: book doesn't have that format")
if files:
self.upload_books(files, names, metadata)
except:
# Shouldn't ever happen, but just in case
traceback.print_exc()
# Inform user about future-dated books
try:
if books_with_future_dates:
d = error_dialog(self, _('Book format sync problem'),
_('Some book formats in your library cannot be '
'synced because they have dates in the future'),
det_msg='\n'.join(books_with_future_dates),
show=False,
show_copy_button=True)
d.show()
except:
traceback.print_exc()
if DEBUG:
prints('DeviceJob: set_books_in_library finished: time=',
time.time() - start_time)
# The status line is reset when the job finishes
return update_metadata
# }}}
| gpl-3.0 | -3,263,801,130,236,667,400 | 43.265296 | 119 | 0.537903 | false |
xbmc/atv2 | xbmc/lib/libPython/Python/Mac/Modules/folder/folderscan.py | 5 | 2063 | # Scan an Apple header file, generating a Python file of generator calls.
import sys
import os
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner_OSX
LONG = "Folders"
SHORT = "folder"
OBJECT = "NOTUSED"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
scanner.gentypetest(SHORT+"typetest.py")
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner_OSX):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
"FindFolderExtended", # Has funny void* argument
"FSFindFolderExtended", # ditto
"FolderManagerRegisterCallNotificationProcs", # ditto
"FindFolderEx", # Non-MacOS routine
]
def makeblacklisttypes(self):
return [
"FolderManagerNotificationProcPtr",
"FolderManagerNotificationUPP",
"FolderRouting", # To be done, not difficult
"FolderDesc", # To be done, not difficult
]
def makerepairinstructions(self):
return [
]
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
self.defsfile.write("true = True\n")
self.defsfile.write("false = False\n")
if __name__ == "__main__":
main()
| gpl-2.0 | 4,370,624,281,632,602,600 | 29.338235 | 85 | 0.587009 | false |
aarticianpc/greenpointtrees | src/paypal/payflow/facade.py | 9 | 6807 | """
Bridging module between Oscar and the gateway module (which is Oscar agnostic)
"""
from __future__ import unicode_literals
from oscar.apps.payment import exceptions
from paypal.payflow import gateway, models, codes
def authorize(order_number, amt, bankcard, billing_address=None):
"""
Make an *authorisation* request
This holds the money on the customer's bank account but does not mark the
transaction for settlement. This is the most common method to use for
fulfilling goods that require shipping. When the goods are ready to be
shipped, the transaction can be marked for settlement by calling the
delayed_capture method.
If successful, return nothing ("silence is golden") - if unsuccessful raise
an exception which can be caught and handled within view code.
:order_number: Order number for request
:amt: Amount for transaction
:bankcard: Instance of Oscar's Bankcard class (which is just a dumb wrapper
around the pertinent bankcard attributes).
:billing_address: A dict of billing address information (which can
come from the `cleaned_data` of a billing address form).
"""
return _submit_payment_details(
gateway.authorize, order_number, amt, bankcard, billing_address)
def sale(order_number, amt, bankcard, billing_address=None):
"""
Make a *sale* request
This holds the money on the customer's bank account and marks the
transaction for settlement that night. This is appropriate method to use
for products that can be immediately fulfilled - such as digital products.
If successful, return nothing ("silence is golden") - if unsuccessful raise
an exception which can be caught and handled within view code.
:order_number: Order number for request
:amt: Amount for transaction
:bankcard: Instance of Oscar's Bankcard class (which is just a dumb wrapper
around the pertinent bankcard attributes).
:billing_address: A dict of billing address information (which can come from
the `cleaned_data` of a billing address form.
"""
return _submit_payment_details(gateway.sale, order_number, amt, bankcard,
billing_address)
def _submit_payment_details(
gateway_fn, order_number, amt, bankcard, billing_address=None):
# Remap address fields if set.
address_fields = {}
if billing_address:
address_fields.update({
'first_name': billing_address['first_name'],
'last_name': billing_address['last_name'],
'street': billing_address['line1'],
'city': billing_address['line4'],
'state': billing_address['state'],
'zip': billing_address['postcode'].strip(' ')
})
txn = gateway_fn(
order_number,
card_number=bankcard.number,
cvv=bankcard.cvv,
expiry_date=bankcard.expiry_month("%m%y"),
amt=amt,
**address_fields)
if not txn.is_approved:
raise exceptions.UnableToTakePayment(txn.respmsg)
return txn
def delayed_capture(order_number, pnref=None, amt=None):
"""
Capture funds that have been previously authorized.
Notes:
* It's possible to capture a lower amount than the original auth
transaction - however...
* ...only one delayed capture is allowed for a given PNREF...
* ...If multiple captures are required, a 'reference transaction' needs to be
used.
* It's safe to retry captures if the first one fails or errors
:order_number: Order number
:pnref: The PNREF of the authorization transaction to use. If not
specified, the order number is used to retrieve the appropriate transaction.
:amt: A custom amount to capture.
"""
if pnref is None:
# No PNREF specified, look-up the auth transaction for this order number
# to get the PNREF from there.
try:
auth_txn = models.PayflowTransaction.objects.get(
comment1=order_number, trxtype=codes.AUTHORIZATION)
except models.PayflowTransaction.DoesNotExist:
raise exceptions.UnableToTakePayment(
"No authorization transaction found with PNREF=%s" % pnref)
pnref = auth_txn
txn = gateway.delayed_capture(order_number, pnref, amt)
if not txn.is_approved:
raise exceptions.UnableToTakePayment(txn.respmsg)
return txn
def referenced_sale(order_number, pnref, amt):
"""
Capture funds using the bank/address details of a previous transaction
This is equivalent to a *sale* transaction but without the user having to
enter their payment details.
There are two main uses for this:
1. This allows customers to checkout without having to re-enter their
payment details.
2. It allows an initial authorisation to be settled in multiple parts. The
first settle should use delayed_capture but any subsequent ones should
use this method.
:order_number: Order number.
:pnref: PNREF of a previous transaction to use.
:amt: The amount to settle for.
"""
txn = gateway.reference_transaction(
order_number, pnref, amt)
if not txn.is_approved:
raise exceptions.UnableToTakePayment(txn.respmsg)
return txn
def void(order_number, pnref):
"""
Void an authorisation transaction to prevent it from being settled
:order_number: Order number
:pnref: The PNREF of the transaction to void.
"""
txn = gateway.void(order_number, pnref)
if not txn.is_approved:
raise exceptions.PaymentError(txn.respmsg)
return txn
def credit(order_number, pnref=None, amt=None):
"""
Return funds that have been previously settled.
:order_number: Order number
:pnref: The PNREF of the authorization transaction to use. If not
specified, the order number is used to retrieve the appropriate transaction.
:amt: A custom amount to capture. If not specified, the entire transaction
is refuneded.
"""
if pnref is None:
# No PNREF specified, look-up the auth/sale transaction for this order number
# to get the PNREF from there.
try:
auth_txn = models.PayflowTransaction.objects.get(
comment1=order_number, trxtype__in=(codes.AUTHORIZATION,
codes.SALE))
except models.PayflowTransaction.DoesNotExist:
raise exceptions.UnableToTakePayment(
"No authorization transaction found with PNREF=%s" % pnref)
pnref = auth_txn
txn = gateway.credit(order_number, pnref, amt)
if not txn.is_approved:
raise exceptions.PaymentError(txn.respmsg)
return txn
| mit | -7,567,328,304,063,662,000 | 36.401099 | 88 | 0.669899 | false |
smeissner/eden | modules/tests/volunteer/create_volunteer_certificate.py | 4 | 2743 | """ Sahana Eden Automated Test - HRM001 Create Volunteer Certificate
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
import unittest
from tests.web2unittest import SeleniumUnitTest
from selenium.common.exceptions import NoSuchElementException
from s3 import s3_debug
from tests import *
#import unittest, re, time
import time
class CreateVolunteerCertificate(SeleniumUnitTest):
def test_hrm001_create_volunteer_certificate(self):
"""
@case: HRM001
@description: Create Volunteer Certificate
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
self.login(account="admin", nexturl="vol/certificate/create")
self.create("hrm_certificate",
[( "name",
"Advance First Aid ATest"
),
( "organisation_id",
"Timor-Leste Red Cross Society",
"autocomplete"),
( "expiry",
"12"
),
]
)
self.create("hrm_certificate_skill",
[( "skill_id",
"Hazmat",
"option"
),
( "competency_id",
"Level 2",
"option"),
]
) | mit | 439,445,761,913,301,600 | 37.111111 | 110 | 0.60226 | false |
ad-m/zgromadzenia-warszawa | server.py | 1 | 1324 | import requests
import requests_cache
import bs4
import datetime
import json
from icalendar import Calendar, Event # icalendar==3.9.0
requests_cache.configure('cache_database', expire_after=60*60)
headers = {'Content-Type': 'text/calendar; charset=utf-8',
'Content-Disposition': 'inline; filename=calendar.ics'}
def generate_calendar():
req = requests.get('http://bezpieczna.um.warszawa.pl/imprezy-masowe/zgromadzenia')
soup = bs4.BeautifulSoup(req.text)
trs = soup.find('table', attrs={'class': 'ViewsTable'}).findAll('tr')
label = [x.text.strip() for x in trs[0].findAll('th')]
cal = Calendar()
cal.add('prodid', '-//Zgromadzenia publiczne w Warszawie//jawne.info.pl//')
cal.add('version', '0.1.0')
for tr in trs[1:]:
date_string = tr.find('td').text.strip()
date = datetime.datetime.strptime(date_string, '%Y-%m-%d').date()
values = [x.text.strip() for x in tr.findAll('td')]
text = json.dumps(dict(zip(label, values)), indent=4)
event = Event()
event.add('description', text)
event.add('dtstart', date)
event.add('dtend', date)
cal.add_component(event)
return cal.to_ical()
def application(environ, start_response):
start_response('200 OK', headers.items())
return [generate_calendar()]
| gpl-2.0 | -4,089,641,514,613,815,000 | 29.790698 | 86 | 0.648792 | false |
ramineni/my_congress | congress/dse2/control_bus.py | 1 | 6572 | # Copyright (c) 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import json
import time
import eventlet
eventlet.monkey_patch()
from oslo_log import log as logging
from congress.dse2 import data_service
LOG = logging.getLogger()
def drop_cast_echos(wrapped):
@functools.wraps(wrapped)
def wrapper(rpc_endpoint, message_context, *args, **kwargs):
node = rpc_endpoint.dse_bus.node
if message_context['node_id'] == node.node_id:
LOG.trace("<%s> Ignoring my echo", node.node_id)
return
return wrapped(rpc_endpoint, message_context, *args, **kwargs)
return wrapper
class HeartbeatEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
# Let the base class default method handle all other cases
return json.JSONEncoder.default(self, obj)
class _DseControlBusEndpoint(object):
def __init__(self, dse_bus):
self.dse_bus = dse_bus
@drop_cast_echos
def accept_heartbeat(self, client_ctxt, args):
LOG.debug("<%s> Accepted heartbeat: context=%s, args='%s'",
self.dse_bus.node.node_id, client_ctxt, args)
hb = json.loads(args)
# convert dict to set
for target in hb['subscribed_tables']:
hb['subscribed_tables'][target] = set(
hb['subscribed_tables'][target])
peer_id = client_ctxt['node_id']
new_status = {
'node_id': peer_id,
'instance': client_ctxt['instance'],
'services': hb['services'],
'subscribed_tables': hb['subscribed_tables']
}
old_status = self.dse_bus.peers.get(peer_id)
if old_status:
# TODO(pballand): validate instance, services
LOG.trace("<%s> Refreshed peer '%s' with services %s",
self.dse_bus.node.node_id, peer_id,
[s['service_id'] for s in new_status['services']])
else:
LOG.info("<%s> New peer '%s' with services %s",
self.dse_bus.node.node_id, peer_id,
[s['service_id'] for s in new_status['services']])
self.dse_bus.peers[peer_id] = new_status
# TODO(pballand): handle time going backwards
self.dse_bus.peers[peer_id]['last_hb_time'] = time.time()
# Note(thread-safety): blocking function
@drop_cast_echos
def list_services(self, client_ctxt):
LOG.debug("<%s> Peer '%s' requested updated service list",
self.dse_bus.node.node_id, client_ctxt['node_id'])
# Note(thread-safety): blocking call
self.dse_bus._publish_heartbeat()
class DseNodeControlBus(data_service.DataService):
"""Maintain DSE connection for a DseNode.
The DSE maintains a common directory of data services and their
corresponding exported tables and RPCs. This control bus maintains
this view using oslo.messaging RPC primitives.
"""
HEARTBEAT_INTERVAL = 1
def __init__(self, node):
self.node = node
self.control_bus_ep = _DseControlBusEndpoint(self)
self.peers = {}
super(DseNodeControlBus, self).__init__('_control_bus')
def rpc_endpoints(self):
return [self.control_bus_ep]
# Note(thread-safety): blocking function
def _publish_heartbeat(self):
args = json.dumps(
{'services': [s.info.to_dict()
for s in self.node.get_services(True)],
# FIXME(ekcs): suppress subscriber details for each subscribed
# table to avoid unnecessary network traffic. Only binary
# information needed over HB.
'subscribed_tables': self.node.subscriptions},
cls=HeartbeatEncoder)
# Note(thread-safety): blocking call
self.node.broadcast_service_rpc(self.service_id, 'accept_heartbeat',
{'args': args})
def _call_heartbeat_callbacks(self):
for service in self.node.get_services():
heartbeat_callbacks = service.heartbeat_callbacks.values()
for f in heartbeat_callbacks:
if not service._running:
break
# Note(thread-safety): potentially blocking call
f()
# Note(thread-safety): blocking function
def _heartbeat_loop(self):
while self._running:
self._publish_heartbeat()
self.node._update_tables_with_subscriber()
self._call_heartbeat_callbacks()
eventlet.sleep(self.HEARTBEAT_INTERVAL)
# Note(thread-safety): blocking function
def _refresh_peers(self):
# Request immediate status refresh from peers
LOG.debug("<%s> Requesting service list from all peers",
self.node.node_id)
self.node.broadcast_service_rpc(self.service_id, 'list_services')
# Note(thread-safety): blocking function
def start(self):
if self._running:
LOG.debug('control bus on %s already started.' % self.node.node_id)
return
LOG.debug("<%s> Starting DSE control bus", self.node.node_id)
super(DseNodeControlBus, self).start()
# TODO(pballand): ensure I am not currently running
# Add an instance UUID to the node status, have timeout on nodes
self._refresh_peers()
# TODO(pballand): before enabling self, check if my node ID is
# already present (no consensus service, so use timeout heuristic)
self._heartbeat_thread = eventlet.spawn(self._heartbeat_loop)
def stop(self):
LOG.debug("<%s> Stopping DSE control bus", self.node.node_id)
super(DseNodeControlBus, self).stop()
eventlet.greenthread.kill(self._heartbeat_thread)
def dse_status(self):
"""Return latest observation of DSE status."""
# TODO(pballand): include node status [JOINING, JOINED]
return {'peers': self.peers}
| apache-2.0 | -1,078,609,144,041,860,000 | 36.554286 | 79 | 0.617012 | false |
renyi533/tensorflow | tensorflow/python/ops/batch_norm_benchmark.py | 25 | 10767 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""End-to-end benchmark for batch normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def batch_norm_op(tensor, mean, variance, beta, gamma, scale):
"""Fused kernel for batch normalization."""
# _batch_norm_with_global_normalization is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
tensor, mean, variance, beta, gamma, 0.001, scale)
# pylint: enable=protected-access
# Note that the naive implementation is much slower:
# batch_norm = (tensor - mean) * tf.math.rsqrt(variance + 0.001)
# if scale:
# batch_norm *= gamma
# return batch_norm + beta
def batch_norm_py(tensor, mean, variance, beta, gamma, scale):
"""Python implementation of batch normalization."""
return nn_impl.batch_normalization(tensor, mean, variance, beta, gamma if
scale else None, 0.001)
def batch_norm_slow(tensor, mean, variance, beta, gamma, scale):
batch_norm = (tensor - mean) * math_ops.rsqrt(variance + 0.001)
if scale:
batch_norm *= gamma
return batch_norm + beta
def build_graph(device, input_shape, axes, num_layers, mode, scale, train):
"""Build a graph containing a sequence of batch normalizations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
Returns:
An array of tensors to run()
"""
moment_shape = []
keep_dims = mode == "py" or mode == "slow"
if keep_dims:
for axis in range(len(input_shape)):
if axis in axes:
moment_shape.append(1)
else:
moment_shape.append(input_shape[axis])
else:
for axis in range(len(input_shape)):
if axis not in axes:
moment_shape.append(input_shape[axis])
with ops.device("/%s:0" % device):
tensor = variables.Variable(random_ops.truncated_normal(input_shape))
for _ in range(num_layers):
if train:
mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims)
else:
mean = array_ops.zeros(moment_shape)
variance = array_ops.ones(moment_shape)
beta = variables.Variable(array_ops.zeros(moment_shape))
gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape))
if mode == "py":
tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale)
elif mode == "op":
tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale)
elif mode == "slow":
tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale)
if train:
return gradients_impl.gradients([tensor], variables.trainable_variables())
else:
return [tensor]
def print_difference(mode, t1, t2):
"""Print the difference in timing between two runs."""
difference = (t2 - t1) / t1 * 100.0
print("=== %s: %.1f%% ===" % (mode, difference))
class BatchNormBenchmark(test.Benchmark):
"""Benchmark batch normalization."""
def _run_graph(self, device, input_shape, axes, num_layers, mode, scale,
train, num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
axes: axes that are to be normalized across.
num_layers: number of batch normalization layers in the graph.
mode: "op", "py" or "slow" depending on the implementation.
scale: scale after normalization.
train: if true, also run backprop.
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, axes, num_layers, mode, scale,
train)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
_ = session.run([out.op for out in outputs]) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run([out.op for out in outputs])
duration = time.time() - start_time
print("%s shape:%d/%d #layers:%d mode:%s scale:%r train:%r - %f secs" %
(device, len(input_shape), len(axes), num_layers, mode, scale, train,
duration / num_iters))
name_template = (
"batch_norm_{device}_input_shape_{shape}_axes_{axes}_mode_{mode}_"
"layers_{num_layers}_scale_{scale}_"
"train_{train}")
self.report_benchmark(
name=name_template.format(
device=device,
mode=mode,
num_layers=num_layers,
scale=scale,
train=train,
shape=str(input_shape).replace(" ", ""),
axes=str(axes)).replace(" ", ""),
iters=num_iters,
wall_time=duration / num_iters)
return duration
def benchmark_batch_norm(self):
print("Forward convolution (lower layers).")
shape = [8, 128, 128, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (lower layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward convolution (higher layers).")
shape = [256, 17, 17, 32]
axes = [0, 1, 2]
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward/backward convolution (higher layers).")
t1 = self._run_graph("cpu", shape, axes, 10, "op", True, True, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 5)
t3 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 5)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "op", True, True, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 50)
t3 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 50)
print_difference("op vs py", t1, t2)
print_difference("py vs slow", t2, t3)
print("Forward fully-connected.")
shape = [1024, 32]
axes = [0]
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, False, 5)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, False, 5)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, False, 50)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, False, 50)
print_difference("py vs slow", t1, t2)
print("Forward/backward fully-connected.")
t1 = self._run_graph("cpu", shape, axes, 10, "py", True, True, 50)
t2 = self._run_graph("cpu", shape, axes, 10, "slow", True, True, 50)
print_difference("py vs slow", t1, t2)
if FLAGS.use_gpu:
t1 = self._run_graph("gpu", shape, axes, 10, "py", True, True, 5)
t2 = self._run_graph("gpu", shape, axes, 10, "slow", True, True, 5)
print_difference("py vs slow", t1, t2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--use_gpu",
type="bool",
nargs="?",
const=True,
default=True,
help="Run GPU benchmarks."
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
test.main(argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 2,334,822,461,502,336,000 | 39.939163 | 80 | 0.62998 | false |
klkuhlm/mpmath | mpmath/tests/test_special.py | 15 | 2848 | from mpmath import *
def test_special():
assert inf == inf
assert inf != -inf
assert -inf == -inf
assert inf != nan
assert nan != nan
assert isnan(nan)
assert --inf == inf
assert abs(inf) == inf
assert abs(-inf) == inf
assert abs(nan) != abs(nan)
assert isnan(inf - inf)
assert isnan(inf + (-inf))
assert isnan(-inf - (-inf))
assert isnan(inf + nan)
assert isnan(-inf + nan)
assert mpf(2) + inf == inf
assert 2 + inf == inf
assert mpf(2) - inf == -inf
assert 2 - inf == -inf
assert inf > 3
assert 3 < inf
assert 3 > -inf
assert -inf < 3
assert inf > mpf(3)
assert mpf(3) < inf
assert mpf(3) > -inf
assert -inf < mpf(3)
assert not (nan < 3)
assert not (nan > 3)
assert isnan(inf * 0)
assert isnan(-inf * 0)
assert inf * 3 == inf
assert inf * -3 == -inf
assert -inf * 3 == -inf
assert -inf * -3 == inf
assert inf * inf == inf
assert -inf * -inf == inf
assert isnan(nan / 3)
assert inf / -3 == -inf
assert inf / 3 == inf
assert 3 / inf == 0
assert -3 / inf == 0
assert 0 / inf == 0
assert isnan(inf / inf)
assert isnan(inf / -inf)
assert isnan(inf / nan)
assert mpf('inf') == mpf('+inf') == inf
assert mpf('-inf') == -inf
assert isnan(mpf('nan'))
assert isinf(inf)
assert isinf(-inf)
assert not isinf(mpf(0))
assert not isinf(nan)
def test_special_powers():
assert inf**3 == inf
assert isnan(inf**0)
assert inf**-3 == 0
assert (-inf)**2 == inf
assert (-inf)**3 == -inf
assert isnan((-inf)**0)
assert (-inf)**-2 == 0
assert (-inf)**-3 == 0
assert isnan(nan**5)
assert isnan(nan**0)
def test_functions_special():
assert exp(inf) == inf
assert exp(-inf) == 0
assert isnan(exp(nan))
assert log(inf) == inf
assert isnan(log(nan))
assert isnan(sin(inf))
assert isnan(sin(nan))
assert atan(inf).ae(pi/2)
assert atan(-inf).ae(-pi/2)
assert isnan(sqrt(nan))
assert sqrt(inf) == inf
def test_convert_special():
float_inf = 1e300 * 1e300
float_ninf = -float_inf
float_nan = float_inf/float_ninf
assert mpf(3) * float_inf == inf
assert mpf(3) * float_ninf == -inf
assert isnan(mpf(3) * float_nan)
assert not (mpf(3) < float_nan)
assert not (mpf(3) > float_nan)
assert not (mpf(3) <= float_nan)
assert not (mpf(3) >= float_nan)
assert float(mpf('1e1000')) == float_inf
assert float(mpf('-1e1000')) == float_ninf
assert float(mpf('1e100000000000000000')) == float_inf
assert float(mpf('-1e100000000000000000')) == float_ninf
assert float(mpf('1e-100000000000000000')) == 0.0
def test_div_bug():
assert isnan(nan/1)
assert isnan(nan/2)
assert inf/2 == inf
assert (-inf)/2 == -inf
| bsd-3-clause | -7,103,520,126,268,296,000 | 24.20354 | 60 | 0.566011 | false |
peterlauri/django | tests/postgres_tests/test_aggregates.py | 9 | 13416 | import json
from django.db.models.expressions import F, Value
from django.test.testcases import skipUnlessDBFeature
from django.test.utils import Approximate
from . import PostgreSQLTestCase
from .models import AggregateTestModel, StatTestModel
try:
from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, JsonAgg,
RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope,
RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg,
)
except ImportError:
pass # psycopg2 is not installed
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo3', integer_field=2)
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo4', integer_field=0)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': []})
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': None})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bit_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': None})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': None})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': None})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo3;Foo4'})
def test_string_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': ''})
@skipUnlessDBFeature('has_jsonb_datatype')
def test_json_agg(self):
values = AggregateTestModel.objects.aggregate(jsonagg=JsonAgg('char_field'))
self.assertEqual(values, {'jsonagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']})
@skipUnlessDBFeature('has_jsonb_datatype')
def test_json_agg_empty(self):
values = AggregateTestModel.objects.none().aggregate(jsonagg=JsonAgg('integer_field'))
self.assertEqual(values, json.loads('{"jsonagg": []}'))
class TestStringAggregateDistinct(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Foo')
AggregateTestModel.objects.create(char_field='Bar')
def test_string_agg_distinct_false(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=False))
self.assertEqual(values['stringagg'].count('Foo'), 2)
self.assertEqual(values['stringagg'].count('Bar'), 1)
def test_string_agg_distinct_true(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=True))
self.assertEqual(values['stringagg'].count('Foo'), 1)
self.assertEqual(values['stringagg'].count('Bar'), 1)
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_corr_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': None})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': None})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_covar_pop_sample_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': None})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': None})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_avgy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': None})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 0})
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_intercept_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': None})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_r2_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': None})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_slope_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': None})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': None})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_sxy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': None})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_syy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': None})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
| bsd-3-clause | -639,002,158,699,216,800 | 43.571429 | 119 | 0.674195 | false |
brianwrf/mongo-python-driver | test/test_pooling.py | 8 | 14669 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test built in connection-pooling with threads."""
import gc
import random
import sys
import threading
import time
from pymongo import MongoClient
from pymongo.errors import (AutoReconnect,
ConnectionFailure,
DuplicateKeyError,
ExceededMaxWaiters)
sys.path[0:0] = [""]
from pymongo.network import socket_closed
from pymongo.pool import Pool, PoolOptions
from test import host, port, SkipTest, unittest, client_context
from test.utils import (get_pool,
joinall,
delay,
one,
rs_or_single_client)
@client_context.require_connection
def setUpModule():
pass
N = 10
DB = "pymongo-pooling-tests"
def gc_collect_until_done(threads, timeout=60):
start = time.time()
running = list(threads)
while running:
assert (time.time() - start) < timeout, "Threads timed out"
for t in running:
t.join(0.1)
if not t.isAlive():
running.remove(t)
gc.collect()
class MongoThread(threading.Thread):
"""A thread that uses a MongoClient."""
def __init__(self, client):
super(MongoThread, self).__init__()
self.daemon = True # Don't hang whole test if thread hangs.
self.client = client
self.db = self.client[DB]
self.passed = False
def run(self):
self.run_mongo_thread()
self.passed = True
def run_mongo_thread(self):
raise NotImplementedError
class InsertOneAndFind(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
rand = random.randint(0, N)
_id = self.db.sf.insert_one({"x": rand}).inserted_id
assert rand == self.db.sf.find_one(_id)["x"]
class Unique(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
self.db.unique.insert_one({}) # no error
class NonUnique(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
try:
self.db.unique.insert_one({"_id": "jesse"})
except DuplicateKeyError:
pass
else:
raise AssertionError("Should have raised DuplicateKeyError")
class Disconnect(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
self.client.close()
class SocketGetter(MongoThread):
"""Utility for TestPooling.
Checks out a socket and holds it forever. Used in
test_no_wait_queue_timeout, test_wait_queue_multiple, and
test_no_wait_queue_multiple.
"""
def __init__(self, client, pool):
super(SocketGetter, self).__init__(client)
self.state = 'init'
self.pool = pool
self.sock = None
def run_mongo_thread(self):
self.state = 'get_socket'
# Pass 'checkout' so we can hold the socket.
with self.pool.get_socket({}, checkout=True) as sock:
self.sock = sock
self.state = 'sock'
def __del__(self):
if self.sock:
self.sock.close()
def run_cases(client, cases):
threads = []
n_runs = 5
for case in cases:
for i in range(n_runs):
t = case(client)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
assert t.passed, "%s.run() threw an exception" % repr(t)
class _TestPoolingBase(unittest.TestCase):
"""Base class for all connection-pool tests."""
def setUp(self):
self.c = rs_or_single_client()
db = self.c[DB]
db.unique.drop()
db.test.drop()
db.unique.insert_one({"_id": "jesse"})
db.test.insert_many([{} for _ in range(10)])
def create_pool(self, pair=(host, port), *args, **kwargs):
return Pool(pair, PoolOptions(*args, **kwargs))
class TestPooling(_TestPoolingBase):
def test_max_pool_size_validation(self):
self.assertRaises(
ValueError, MongoClient, host=host, port=port,
maxPoolSize=-1)
self.assertRaises(
ValueError, MongoClient, host=host, port=port,
maxPoolSize='foo')
c = MongoClient(host=host, port=port, maxPoolSize=100)
self.assertEqual(c.max_pool_size, 100)
def test_no_disconnect(self):
run_cases(self.c, [NonUnique, Unique, InsertOneAndFind])
def test_disconnect(self):
run_cases(self.c, [InsertOneAndFind, Disconnect, Unique])
def test_pool_reuses_open_socket(self):
# Test Pool's _check_closed() method doesn't close a healthy socket.
cx_pool = self.create_pool(max_pool_size=10)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
pass
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_get_socket_and_exception(self):
# get_socket() returns socket after a non-network error.
cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1)
with self.assertRaises(ZeroDivisionError):
with cx_pool.get_socket({}) as sock_info:
1 / 0
# Socket was returned, not closed.
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_closed_socket(self):
# Test that Pool removes explicitly closed socket.
cx_pool = self.create_pool()
with cx_pool.get_socket({}) as sock_info:
# Use SocketInfo's API to close the socket.
sock_info.close()
self.assertEqual(0, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self):
# Test that Pool removes dead socket and the socket doesn't return
# itself PYTHON-344
cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
# Simulate a closed socket without telling the SocketInfo it's
# closed.
sock_info.sock.close()
self.assertTrue(socket_closed(sock_info.sock))
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(0, len(cx_pool.sockets))
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
# Semaphore was released.
with cx_pool.get_socket({}):
pass
def test_socket_closed(self):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost', 27017))
self.assertFalse(socket_closed(s))
s.close()
self.assertTrue(socket_closed(s))
def test_return_socket_after_reset(self):
pool = self.create_pool()
with pool.get_socket({}) as sock:
pool.reset()
self.assertTrue(sock.closed)
self.assertEqual(0, len(pool.sockets))
def test_pool_check(self):
# Test that Pool recovers from two connection failures in a row.
# This exercises code at the end of Pool._check().
cx_pool = self.create_pool(max_pool_size=1,
connect_timeout=1,
wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
# Simulate a closed socket without telling the SocketInfo it's
# closed.
sock_info.sock.close()
# Swap pool's address with a bad one.
address, cx_pool.address = cx_pool.address, ('foo.com', 1234)
with self.assertRaises(AutoReconnect):
with cx_pool.get_socket({}):
pass
# Back to normal, semaphore was correctly released.
cx_pool.address = address
with cx_pool.get_socket({}, checkout=True) as sock_info:
pass
sock_info.close()
def test_wait_queue_timeout(self):
wait_queue_timeout = 2 # Seconds
pool = self.create_pool(
max_pool_size=1, wait_queue_timeout=wait_queue_timeout)
with pool.get_socket({}) as sock_info:
start = time.time()
with self.assertRaises(ConnectionFailure):
with pool.get_socket({}):
pass
duration = time.time() - start
self.assertTrue(
abs(wait_queue_timeout - duration) < 1,
"Waited %.2f seconds for a socket, expected %f" % (
duration, wait_queue_timeout))
sock_info.close()
def test_no_wait_queue_timeout(self):
# Verify get_socket() with no wait_queue_timeout blocks forever.
pool = self.create_pool(max_pool_size=1)
# Reach max_size.
with pool.get_socket({}) as s1:
t = SocketGetter(self.c, pool)
t.start()
while t.state != 'get_socket':
time.sleep(0.1)
time.sleep(1)
self.assertEqual(t.state, 'get_socket')
while t.state != 'sock':
time.sleep(0.1)
self.assertEqual(t.state, 'sock')
self.assertEqual(t.sock, s1)
s1.close()
def test_wait_queue_multiple(self):
wait_queue_multiple = 3
pool = self.create_pool(
max_pool_size=2, wait_queue_multiple=wait_queue_multiple)
# Reach max_size sockets.
with pool.get_socket({}):
with pool.get_socket({}):
# Reach max_size * wait_queue_multiple waiters.
threads = []
for _ in range(6):
t = SocketGetter(self.c, pool)
t.start()
threads.append(t)
time.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
with self.assertRaises(ExceededMaxWaiters):
with pool.get_socket({}):
pass
def test_no_wait_queue_multiple(self):
pool = self.create_pool(max_pool_size=2)
socks = []
for _ in range(2):
# Pass 'checkout' so we can hold the socket.
with pool.get_socket({}, checkout=True) as sock:
socks.append(sock)
threads = []
for _ in range(30):
t = SocketGetter(self.c, pool)
t.start()
threads.append(t)
time.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
for socket_info in socks:
socket_info.close()
class TestPoolMaxSize(_TestPoolingBase):
def test_max_pool_size(self):
max_pool_size = 4
c = rs_or_single_client(maxPoolSize=max_pool_size)
collection = c[DB].test
# Need one document.
collection.drop()
collection.insert_one({})
# nthreads had better be much larger than max_pool_size to ensure that
# max_pool_size sockets are actually required at some point in this
# test's execution.
cx_pool = get_pool(c)
nthreads = 10
threads = []
lock = threading.Lock()
self.n_passed = 0
def f():
for _ in range(5):
collection.find_one({'$where': delay(0.1)})
assert len(cx_pool.sockets) <= max_pool_size
with lock:
self.n_passed += 1
for i in range(nthreads):
t = threading.Thread(target=f)
threads.append(t)
t.start()
joinall(threads)
self.assertEqual(nthreads, self.n_passed)
self.assertTrue(len(cx_pool.sockets) > 1)
self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
def test_max_pool_size_none(self):
c = rs_or_single_client(maxPoolSize=None)
collection = c[DB].test
# Need one document.
collection.drop()
collection.insert_one({})
cx_pool = get_pool(c)
nthreads = 10
threads = []
lock = threading.Lock()
self.n_passed = 0
def f():
for _ in range(5):
collection.find_one({'$where': delay(0.1)})
with lock:
self.n_passed += 1
for i in range(nthreads):
t = threading.Thread(target=f)
threads.append(t)
t.start()
joinall(threads)
self.assertEqual(nthreads, self.n_passed)
self.assertTrue(len(cx_pool.sockets) > 1)
def test_max_pool_size_zero(self):
with self.assertRaises(ValueError):
rs_or_single_client(maxPoolSize=0)
def test_max_pool_size_with_connection_failure(self):
# The pool acquires its semaphore before attempting to connect; ensure
# it releases the semaphore on connection failure.
test_pool = Pool(
('example.com', 27017),
PoolOptions(
max_pool_size=1,
connect_timeout=1,
socket_timeout=1,
wait_queue_timeout=1))
# First call to get_socket fails; if pool doesn't release its semaphore
# then the second call raises "ConnectionFailure: Timed out waiting for
# socket from pool" instead of AutoReconnect.
for i in range(2):
with self.assertRaises(AutoReconnect) as context:
with test_pool.get_socket({}, checkout=True):
pass
# Testing for AutoReconnect instead of ConnectionFailure, above,
# is sufficient right *now* to catch a semaphore leak. But that
# seems error-prone, so check the message too.
self.assertNotIn('waiting for socket from pool',
str(context.exception))
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -4,247,735,235,925,133,300 | 30.210638 | 79 | 0.572568 | false |
rl-institut/reegis_hp | reegis_hp/de21/config.py | 3 | 3003 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 5 12:26:40 2014
:module-author: steffen
:filename: config.py
This module provides a high level layer for reading and writing config files.
There must be a file called "config.ini" in the root-folder of the project.
The file has to be of the following structure to be imported correctly.
# this is a comment \n
# the file structure is like: \n
\n
[netCDF] \n
RootFolder = c://netCDF \n
FilePrefix = cd2_ \n
\n
[mySQL] \n
host = localhost \n
user = guest \n
password = root \n
database = znes \n
\n
[SectionName] \n
OptionName = value \n
Option2 = value2 \n
"""
import os
import logging
import configparser as cp
import configuration
FILENAME = 'config.ini'
FILE = os.path.join(os.path.expanduser("~"), '.oemof', FILENAME)
cfg = cp.RawConfigParser()
_loaded = False
def load_config(filename=None):
"""
Load data from config file to `cfg` that can be accessed by get, set
afterwards.
Specify absolute or relative path to your config file.
:param filename: Relative or absolute path
:type filename: str or list
"""
# load config
global FILE
if filename is not None:
FILE = filename
init(FILE)
def main():
pass
def init(file):
"""
Read config file
Parameters
----------
file : str or list
Absolute path to config file (incl. filename)
"""
cfg.read(file)
global _loaded
_loaded = True
def get(section, key):
"""
returns the value of a given key of a given section of the main
config file.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:returns: the value which will be casted to float, int or boolean.
if no cast is successful, the raw string will be returned.
"""
# FILE = 'config_misc'
if not _loaded:
init(FILE)
try:
return cfg.getint(section, key)
except ValueError:
try:
return cfg.getfloat(section, key)
except ValueError:
try:
return cfg.getboolean(section, key)
except ValueError:
try:
value = cfg.get(section, key)
if value == 'None':
value = None
return value
except ValueError:
logging.error(
"section {0} with key {1} not found in {2}".format(
section, key, FILE))
return cfg.get(section, key)
def get_list(section, parameter):
try:
my_list = cfg.get(section, parameter).split(',')
my_list = [x.strip() for x in my_list]
except AttributeError:
my_list = list((cfg.get(section, parameter),))
return my_list
def set(section, key, value):
return cfg.set(section, key, value)
print('Loading de21 configuration....')
configuration.de21_configuration()
if __name__ == "__main__":
main()
| gpl-3.0 | 3,678,157,836,786,666,000 | 20.76087 | 77 | 0.594073 | false |
mojeto/django | django/utils/translation/__init__.py | 16 | 7550 | """
Internationalization support.
"""
import re
import warnings
from contextlib import ContextDecorator
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans:
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
# An alias since Django 2.0
ugettext = gettext
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
# An alias since Django 2.0
ungettext = ngettext
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = ugettext_lazy = lazy(gettext, str)
pgettext_lazy = lazy(pgettext, str)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs['singular'])
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s\'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
# An alias since Django 2.0
ungettext_lazy = ngettext_lazy
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, str, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
warnings.warn(
'django.utils.translate.string_concat() is deprecated in '
'favor of django.utils.text.format_lazy().',
RemovedInDjango21Warning, stacklevel=2)
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, str)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = gettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile(r'\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| bsd-3-clause | 7,985,281,834,317,374,000 | 27.927203 | 104 | 0.646623 | false |
sarvex/tensorflow | tensorflow/python/ops/ragged/__init__.py | 6 | 1409 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ragged Tensors.
This package defines ops for manipulating ragged tensors (`tf.RaggedTensor`),
which are tensors with non-uniform shapes. In particular, each `RaggedTensor`
has one or more *ragged dimensions*, which are dimensions whose slices may have
different lengths. For example, the inner (column) dimension of
`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices
(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed
description of ragged tensors, see the `tf.RaggedTensor` class documentation
and the [Ragged Tensor Guide](/guide/ragged_tensor).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| apache-2.0 | -1,746,021,931,730,091,300 | 47.586207 | 80 | 0.700497 | false |
jcurbelo/networkx | networkx/algorithms/bipartite/matrix.py | 12 | 6616 | # -*- coding: utf-8 -*-
"""
====================
Biadjacency matrices
====================
"""
# Copyright (C) 2013-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import itertools
from networkx.convert import _prep_create_using
from networkx.convert_matrix import _generate_weighted_edges
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <[email protected]>',
'Aric Hagberg <[email protected]>'])
__all__ = ['biadjacency_matrix','from_biadjacency_matrix']
def biadjacency_matrix(G, row_order, column_order=None,
dtype=None, weight='weight', format='csr'):
r"""Return the biadjacency matrix of the bipartite graph G.
Let `G = (U, V, E)` be a bipartite graph with node sets
`U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency
matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1`
if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is
not `None` and matches the name of an edge attribute, its value is
used instead of 1.
Parameters
----------
G : graph
A NetworkX graph
row_order : list of nodes
The rows of the matrix are ordered according to the list of nodes.
column_order : list, optional
The columns of the matrix are ordered according to the list of nodes.
If column_order is None, then the ordering of columns is arbitrary.
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None, optional (default='weight')
The edge data key used to provide each value in the matrix.
If None, then each edge has weight 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [2]_ for details.
Returns
-------
M : SciPy sparse matrix
Biadjacency matrix representation of the bipartite graph G.
Notes
-----
No attempt is made to check that the input graph is bipartite.
For directed bipartite graphs only successors are considered as neighbors.
To obtain an adjacency matrix with ones (or weight values) for both
predecessors and successors you have to generate two biadjacency matrices
where the rows of one of them are the columns of the other, and then add
one to the transpose of the other.
See Also
--------
adjacency_matrix
from_biadjacency_matrix
References
----------
.. [1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
.. [2] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
nlen = len(row_order)
if nlen == 0:
raise nx.NetworkXError("row_order is empty list")
if len(row_order) != len(set(row_order)):
msg = "Ambiguous ordering: `row_order` contained duplicates."
raise nx.NetworkXError(msg)
if column_order is None:
column_order = list(set(G) - set(row_order))
mlen = len(column_order)
if len(column_order) != len(set(column_order)):
msg = "Ambiguous ordering: `column_order` contained duplicates."
raise nx.NetworkXError(msg)
row_index = dict(zip(row_order, itertools.count()))
col_index = dict(zip(column_order, itertools.count()))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((row_index[u],col_index[v],d.get(weight,1))
for u,v,d in G.edges(row_order,data=True)
if u in row_index and v in col_index))
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,mlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def from_biadjacency_matrix(A, create_using=None, edge_attribute='weight'):
r"""Creates a new bipartite graph from a biadjacency matrix given as a
SciPy sparse matrix.
Parameters
----------
A: scipy sparse matrix
A biadjacency matrix representation of a graph
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
The nodes are labeled with the attribute `bipartite` set to an integer
0 or 1 representing membership in part 0 or part 1 of the bipartite graph.
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph` and the entries of `A` are of type ``int``,
then this function returns a multigraph (of the same type as
`create_using`) with parallel edges. In this case, `edge_attribute` will be
ignored.
See Also
--------
biadjacency_matrix
from_numpy_matrix
References
----------
[1] http://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
"""
G = _prep_create_using(create_using)
n, m = A.shape
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n), bipartite=0)
G.add_nodes_from(range(n,n+m), bipartite=1)
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = ((u, n+v, d) for (u, v, d) in _generate_weighted_edges(A))
# If the entries in the adjacency matrix are integers and the graph is a
# multigraph, then create parallel edges, each with weight 1, for each
# entry in the adjacency matrix. Otherwise, create one edge for each
# positive entry in the adjacency matrix and set the weight of that edge to
# be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph():
chain = itertools.chain.from_iterable
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause | 5,992,709,365,052,237,000 | 36.378531 | 94 | 0.639359 | false |
flyapen/UgFlu | common/reflectcall.py | 2 | 4188 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""getting coherent errors when calling procedures in named modules
"""
from twisted.python import reflect
from flumotion.common import errors, log
__version__ = "$Rev: 7162 $"
def reflectCall(moduleName, methodName, *args, **kwargs):
"""
@param moduleName: name of the module to load
@type moduleName: string
@param methodName: name of the function to call
@type methodName: string
Invokes a function in a given module.
"""
log.debug('reflectcall', 'Loading moduleName %s', moduleName)
module = reflect.namedModule(moduleName)
log.debug('reflectcall', 'calling method %s.%s', moduleName,
methodName)
proc = getattr(module, methodName)
return proc(*args, **kwargs)
def reflectCallCatching(err, moduleName, methodName, *args, **kwargs):
"""
@param err: The type of error to throw
@type err: Exception
@param moduleName: name of the module to load
@type moduleName: string
@param methodName: name of the function to call
@type methodName: string
Invokes a function in a given module, marshalling all errors to be
of a certain type.
"""
log.debug('reflectcall', 'Loading moduleName %s' % moduleName)
try:
module = reflect.namedModule(moduleName)
except ValueError:
raise err("module %s could not be found" % moduleName)
except SyntaxError, e:
raise err("module %s has a syntax error in %s:%d"
% (moduleName, e.filename, e.lineno))
except ImportError, e:
# FIXME: basically this is the same as the generic one below...
raise err("module %s could not be imported (%s)"
% (moduleName,
log.getExceptionMessage(e, filename='flumotion')))
except Exception, e:
raise err("module %s could not be imported (%s)"
% (moduleName,
log.getExceptionMessage(e, filename='flumotion')))
if not hasattr(module, methodName):
raise err("module %s has no method named %s"
% (moduleName, methodName))
log.debug('reflectcall', 'calling method %s.%s'
% (moduleName, methodName))
try:
ret = getattr(module, methodName)(*args, **kwargs)
except err:
# already nicely formatted, so fall through
log.debug('reflectcall', 'letting error fall through')
raise
except Exception, e:
msg = log.getExceptionMessage(e)
log.warning('reflectcall', msg)
log.warning('reflectcall', 'raising error')
raise err(msg)
log.debug('reflectcall', 'returning %r' % ret)
return ret
def createComponent(moduleName, methodName, config):
"""
@param moduleName: name of the module to create the component from
@type moduleName: string
@param methodName: the factory method to use to create the component
@type methodName: string
@param config: the component's config dict
@type config: dict
Invokes the entry point for a component in the given module using the
given factory method, thus creating the component.
@rtype: L{flumotion.component.component.BaseComponent}
"""
return reflectCallCatching(errors.ComponentCreateError,
moduleName, methodName, config)
| gpl-2.0 | 4,832,686,532,774,960,000 | 32.774194 | 74 | 0.670726 | false |
owaiskhan/Retransmission-Combining | gr-qtgui/python/qa_qtgui.py | 10 | 1657 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import qtgui_swig
class test_qtgui(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test01 (self):
# Test to make sure we can instantiate the sink
self.qtsnk = qtgui_swig.sink_c(1024, gr.firdes.WIN_BLACKMAN_hARRIS,
0, 1, "Test",
True, True, True, True)
def test02 (self):
# Test to make sure we can instantiate the sink
self.qtsnk = qtgui_swig.sink_f(1024, gr.firdes.WIN_BLACKMAN_hARRIS,
0, 1, "Test",
True, True, True, True)
if __name__ == '__main__':
gr_unittest.run(test_qtgui, "test_qtgui.xml")
| gpl-3.0 | -7,476,349,177,200,291,000 | 33.520833 | 76 | 0.620398 | false |
mpmendenhall/rotationshield | Scripts/StudyPlotter.py | 1 | 13111 | #!/sw/bin/python2.7
import os
import pyx
from pyx import *
from pyx.color import rgb, hsb
from pyx.graph.style import symbol
from math import *
from EDM_IO import *
from LinFitter import *
from ArrowPlotter import *
from polynomial import *
from QFile import *
from EDM_Distortion_Impact import *
def rainbow(n):
return [ hsb((1.0*x)/n,1,1) for x in range(n) ]
def rainbowDict(keys):
n = len(keys)
knew = [k for k in keys]
knew.sort()
return dict([ (k,hsb((1.0*x)/n,1,1)) for (x,k) in enumerate(knew) ])
class cell_geom(KVMap):
"""Data cell geometry"""
def __init__(self,m=None):
self.ll = self.ur = self.c = None
if m:
self.dat = m.dat
self.ll = self.getFirstV("ll")
self.ur = self.getFirstV("ur")
if self.ll and self.ur:
self.c = [0.5*(self.ll[i]+self.ur[i]) for i in range(3)]
self.loadFloats(["nx","ny","nz"])
def make_centered(self):
"""Convert to symmetric centered coordinates"""
if not self.c:
return
self.ll = [self.ll[i] - self.c[i] for i in range(3)]
self.ur = [self.ur[i] - self.c[i] for i in range(3)]
self.c = [0,0,0]
class GeomInfo_File(QFile):
"""Geometry info file"""
def __init__(self,fname):
QFile.__init__(self,fname)
self.cell = cell_geom(self.getFirst("cell"))
self.cell.make_centered()
def Vol3Avg(P,ll,ur):
"Volume average of a polynomial"
return P.average(0,ll[0],ur[0]).average(0,ll[1],ur[1]).average(0,ll[2],ur[2])
def GradV(V):
"Gradient of polynomial vector"
return [Vi.derivative(a) for (a,Vi) in enumerate(V)]
axes = ["x","y","z"]
def longAxis(cell):
"""Determine longest axis of a measurement cell"""
a = [abs(cell[1][i]-cell[0][i]) for i in range(3)]
return a.index(max(a))
def linear_zerocrossings(pts):
"""Linearly interpolate positions of zero-crossings in a list of x,y points"""
pts.sort()
zcross = []
for i in range(len(pts)-1):
x1,y1 = pts[i]
x2,y2 = pts[i+1]
if y1*y2 < 0:
zcross.append( [(y2*x1-y1*x2)/(y2-y1),(y2-y1)/(x2-x1)] )
return zcross
def otherAxis(xi,xj):
assert xi != xj
for i in range(3):
if i not in (xi,xj):
return i
return None
class BCell:
"""B Field in measurement cell from polynomial"""
def __init__(self,fname=None):
if fname:
fitf = open(fname,"r")
self.B = [read_polynomial(fitf) for a in range(3)]
else:
self.B = [polynomial(3) for a in range(3)]
self.ll = self.ur = ()
self.basepath = None
def m2cm(self):
"""Convert position units from m to cm"""
for Bi in self.B:
Bi.rescale([.01,.01,.01])
self.ll = [100*x for x in self.ll]
self.ur = [100*x for x in self.ur]
def avgB(self):
return [Vol3Avg(self.B[i],self.ll,self.ur) for i in range(3)]
def normB0(self,Btarg):
"""Normalize average field to desired quantity"""
B0avg = Vol3Avg(self.B[0],self.ll,self.ur)
self.B = [Bi * (Btarg/B0avg) for Bi in self.B]
return B0avg
def GradBAvg(self):
"""Volume average gradient"""
return [Vol3Avg(Bi,self.ll,self.ur) for Bi in GradV(self.B)]
def DerivsAvg(self):
"""dBi/dj average gradients"""
return [[Vol3Avg(self.B[i].derivative(j),self.ll,self.ur) for j in range(3)] for i in range(3)]
def GradBRMS(self,xi,xj):
"""Volume RMS sqrt(<(dBxi/dBxj)^2>)"""
dBxdz = self.B[xi].derivative(xj)
return sqrt(Vol3Avg(dBxdz*dBxdz,self.ll,self.ur))
def Bmag2(self):
"""Calculate magnitude of B^2 as polynomial"""
return sum([Bi*Bi for Bi in self.B])
def volume(self):
"""Cell volume"""
return abs(product([self.ur[a]-self.ll[a] for a in range(3)]))
def gridLines(self,x0,xi,xj,xk,nptsijk=(3,3,50)):
"""Bx0 along lines points on axes (xi,xj), [ xk, Bx0(x) ]"""
self.slicepts = []
dlines = {}
for (ni,p_i) in enumerate(unifrange(self.ll[xi],self.ur[xi],nptsijk[0])):
for (nj,p_j) in enumerate(unifrange(self.ll[xj],self.ur[xj],nptsijk[1])):
spoints = [ (p_i,p_j,p_k) for p_k in unifrange(self.ll[xk],self.ur[xk],nptsijk[2]) ]
xpts = [ [0,0,0] for p in spoints ]
for (n,s) in enumerate(spoints):
xpts[n][xi] = s[0]
xpts[n][xj] = s[1]
xpts[n][xk] = s[2]
p0 = (xpts[0][xi],xpts[0][xj])
self.slicepts.append( ((ni,nj), p0) )
dlines[p0] = [(x[xk],self.B[x0](x)) for x in xpts]
return dlines
def plotFields(self,x0,xi,xj,xk,nptsijk=(3,3,50)):
# Bx0 slice along fixed xi,xj, varying xk="z"
#x0,xi,xj,xk = 0,0,2,1 # B_x along y
#x0,xi,xj,xk = 0,0,1,2 # B_x along z
istyles = [[rgb.red],[rgb.green],[rgb.blue]]
jstyles = [[style.linestyle.dotted,style.linewidth.THick],[style.linestyle.solid,style.linewidth.THick],[style.linestyle.dashed,style.linewidth.Thick]]
g=graph.graphxy(width=24,height=16,
x=graph.axis.lin(title="$%s$ position [cm]"%axes[xk]),
y=graph.axis.lin(title="$B_{%s}$ [mG]"%axes[x0]),
key = graph.key.key(pos="bc",columns=3))
g.texrunner.set(lfs='foils17pt')
dlines = self.gridLines(x0,xi,xj,xk,nptsijk)
for (ni,nj),(p_i,p_j) in self.slicepts:
g.plot(graph.data.points(dlines[(p_i,p_j)],x=1,y=2,title="$%s,%s = %+.2f,%+.2f$"%(axes[xi],axes[xj],p_i,p_j)),
[graph.style.line(lineattrs=istyles[ni]+jstyles[nj]),])
g.writePDFfile(self.basepath + "/Field_B%s_%s.pdf"%(axes[x0],axes[xk]))
def plotCellProjection(self, xi, xj, nptsijk=None, B0=None, B0scale=None):
if nptsijk is None:
nptsijk = ( int(self.ur[xi]-self.ll[xi]), int(self.ur[xj]-self.ll[xj]), 5 )
# default to residuals plot
if B0 is None:
B0 = self.avgB()
if B0scale is None:
B0scale = sqrt(sum([x**2 for x in B0]))*0.005
"""Plot projection of cell fields in xi-xj plane"""
g=graph.graphxy(width=self.ur[xi]-self.ll[xi]+1, height=self.ur[xj]-self.ll[xj]+1,
x=graph.axis.lin(title="$%s$ position [cm]"%axes[xi], min=self.ll[xi]-0.5, max=self.ur[xi]+0.5),
y=graph.axis.lin(title="$%s$ position [cm]"%axes[xj], min=self.ll[xj]-0.5, max=self.ur[xj]+0.5))
g.texrunner.set(lfs='foils17pt')
AP = ArrowPlotter(g)
xk = otherAxis(xi,xj)
dati = self.gridLines(xi,xi,xj,xk,nptsijk)
datj = self.gridLines(xj,xi,xj,xk,nptsijk)
kcol = rainbow(nptsijk[2])
for nk in range(nptsijk[2]):
gdat = [ (p[1][0], p[1][1], (dati[p[1]][nk][1]-B0[xi])/B0scale, (datj[p[1]][nk][1]-B0[xj])/B0scale) for p in self.slicepts]
asty = graph.style.arrow(lineattrs=[style.linewidth.Thick,kcol[nk]])
AP.plot_arrowdat(gdat,asty,offset=True)
g.writePDFfile(self.basepath + "/Cell_%s-%s.pdf"%(axes[xi],axes[xj]))
class FieldInfo:
"""Output fields information"""
def __init__(self,basepath):
self.basepath = basepath
self.BC = BCell(basepath+"/Fieldstats.txt")
self.GIF = GeomInfo_File(basepath+"/GeomInfo.txt")
if not self.GIF.cell.c:
return
# target B0, mG
self.B0targ = 30.
self.BC.ll,self.BC.ur = self.GIF.cell.ll,self.GIF.cell.ur
self.BC.m2cm()
self.BC.basepath = basepath
self.Bavg = self.BC.normB0(self.B0targ)
class VarParamPlotter:
"""Plotter for field uniformity varying with parameter"""
def __init__(self,outdir,basename="X"):
self.axiscols = {"x":rgb.red,"y":rgb.green,"z":rgb.blue,"S":rgb.black}
self.keypos = "tl"
self.g = None
self.outdir = outdir
self.datlist = [ (float(f[len(basename):].split("_")[1]), FieldInfo(outdir+"/"+f)) for f in os.listdir(outdir) if f[:len(basename)+1]==basename+"_"]
self.datlist = [ d for d in self.datlist if d[1].GIF.cell.c ] # filter out incomplete data points
def setupGraph(self, varname, logx=False, logy2=False, xrange=(None,None), yrange = (None,None), y2range=(0,None), xtrans=(lambda x:x)):
print "Setting up graph for",varname
xaxis = graph.axis.lin(title=varname, min=xrange[0], max=xrange[1])
if logx:
xaxis = graph.axis.log(title=varname, min=xrange[0], max=xrange[1])
y2axis = graph.axis.lin(title="Dephasing rate $1/T_2$ [mHz]", min=y2range[0], max=y2range[1])
if logy2:
y2axis = graph.axis.log(title="Dephasing rate $1/T_2$ [mHz]")
self.xtrans = xtrans
self.g = graph.graphxy(width=24,height=16,
x=xaxis,
y=graph.axis.lin(title="Cell Average Gradients [$\\mu$G/cm]", min=yrange[0], max=yrange[1]),
y2=y2axis,
key = graph.key.key(pos=self.keypos,columns=2))
self.g.texrunner.set(lfs='foils17pt')
self.gT2 = graph.graphxy(width=24,height=16,
x=graph.axis.lin(title=varname),
y=graph.axis.lin(title="Dephasing rate $1/T_2$ [mHz]", min=y2range[0], max=y2range[1]),
key = graph.key.key(pos=self.keypos,columns=2))
self.gT2.texrunner.set(lfs='foils17pt')
self.g.plot(graph.data.function("y(x)=0",title=None),[graph.style.line(lineattrs=[style.linestyle.dotted,style.linewidth.Thick])])
def makePlot(self,cname="",csty=[],cell=None,PGlist=[]):
assert self.g is not None
# collect data
gdat = []
gdat_T2 = []
lax = None
for (r,FI) in self.datlist:
if cell:
FI.BC.ll,FI.BC.ur = cell[0],cell[1]
lax = longAxis((FI.BC.ll,FI.BC.ur))
#GradScaled = [x*1000 for x in FI.BC.GradBAvg()]
derivs = FI.BC.DerivsAvg()
GradScaled = [derivs[i][i]*1000 for i in range(3)]
rms = FI.BC.GradBRMS(0,lax)*1000
dBxdz = derivs[0][2]*1000
print r,"B0 =",FI.Bavg,"\tBgrad =",GradScaled,"\tRMS =",rms
gdat.append([self.xtrans(r),]+GradScaled+[rms,dBxdz])
# unscale milligauss to Gauss; calculate T2
for a in range(3):
FI.BC.B[a] *= 1e-3
gdat_T2.append([self.xtrans(r),]+[1000*abs(T2i(FI.BC,PG)) for PG in PGlist])
gdat.sort()
gdat_T2.sort()
print gdat_T2
for (n,a) in enumerate(axes):
axdat = [(p[0],p[1+n]) for p in gdat]
print "dB%s/d%s zero crossings: %s"%(a,a,cname),linear_zerocrossings(axdat)
ptsymb = graph.style.symbol(symbol.circle,size=0.2,symbolattrs=[self.axiscols[a]])
self.g.plot(graph.data.points(axdat,x=1,y=2,title="$\\langle \\partial B_{%s} / \\partial %s \\rangle$ "%(a,a)+cname),
[graph.style.line(lineattrs=[self.axiscols[a],style.linewidth.THick]+csty),ptsymb])
if PGlist:
self.g.plot(graph.data.points(gdat_T2,x=1,y2=2,title=PG.name+" $1/T_2$"),
[graph.style.line(lineattrs=[style.linewidth.THick]+csty),graph.style.symbol(symbol.circle,size=0.2)])
else:
self.g.plot(graph.data.points(gdat,x=1,y=5,title="${\\langle (\\partial B_x / \\partial %s)^2 \\rangle}^{1/2}$ "%axes[lax]+cname),
[graph.style.line(lineattrs=[style.linewidth.THick]+csty),graph.style.symbol(symbol.circle,size=0.2)])
PGsty = [graph.style.line([style.linestyle.dashed]),graph.style.line([style.linestyle.dotted])]
for (n,PG) in enumerate(PGlist):
self.gT2.plot(graph.data.points(gdat_T2,x=1,y2=2+n,title=PG.name+" $1/T_2$"),
[PGsty[n],graph.style.symbol(symbol.circle,size=0.2)])
if 0:
axdat = [(p[0],p[5]) for p in gdat]
print "dBx/dz zero crossings: %s"%(cname),linear_zerocrossings(axdat)
self.g.plot(graph.data.points(gdat,x=1,y=6,title="$\\langle \\partial B_x / \\partial z \\rangle$ "+cname),
[graph.style.line(lineattrs=[style.linewidth.thin]+csty),graph.style.symbol(symbol.circle,size=0.2)])
def outputPlot(self):
self.g.writePDFfile(self.outdir + "/FieldUniformity.pdf")
#self.gT2.writePDFfile(self.outdir + "/T2.pdf")
if __name__=="__main__":
outdir = os.environ["ROTSHIELD_OUT"]
| gpl-3.0 | 7,324,081,614,083,278,000 | 37.905045 | 159 | 0.541911 | false |
trondhindenes/ansible | lib/ansible/modules/network/aci/aci_bd_subnet.py | 15 | 13411 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_bd_subnet
short_description: Manage Subnets (fv:Subnet)
description:
- Manage Subnets on Cisco ACI fabrics.
notes:
- The C(gateway) parameter is the root key used to access the Subnet (not name), so the C(gateway)
is required when the state is C(absent) or C(present).
- The C(tenant) and C(bd) used must exist before using this module in your playbook.
The M(aci_tenant) module and M(aci_bd) can be used for these.
- More information about the internal APIC class B(fv:Subnet) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
bd:
description:
- The name of the Bridge Domain.
aliases: [ bd_name ]
description:
description:
- The description for the Subnet.
aliases: [ descr ]
enable_vip:
description:
- Determines if the Subnet should be treated as a VIP; used when the BD is extended to multiple sites.
- The APIC defaults to C(no) when unset during creation.
type: bool
gateway:
description:
- The IPv4 or IPv6 gateway address for the Subnet.
aliases: [ gateway_ip ]
mask:
description:
- The subnet mask for the Subnet.
- This is the number assocated with CIDR notation.
- For IPv4 addresses, accepted values range between C(0) and C(32).
- For IPv6 addresses, accepted Values range between C(0) and C(128).
type: int
aliases: [ subnet_mask ]
nd_prefix_policy:
description:
- The IPv6 Neighbor Discovery Prefix Policy to associate with the Subnet.
preferred:
description:
- Determines if the Subnet is preferred over all available Subnets. Only one Subnet per Address Family (IPv4/IPv6).
can be preferred in the Bridge Domain.
- The APIC defaults to C(no) when unset during creation.
type: bool
route_profile:
description:
- The Route Profile to the associate with the Subnet.
route_profile_l3_out:
description:
- The L3 Out that contains the assocated Route Profile.
scope:
description:
- Determines the scope of the Subnet.
- The C(private) option only allows communication with hosts in the same VRF.
- The C(public) option allows the Subnet to be advertised outside of the ACI Fabric, and allows communication with
hosts in other VRFs.
- The shared option limits communication to hosts in either the same VRF or the shared VRF.
- The value is a list of options, C(private) and C(public) are mutually exclusive, but both can be used with C(shared).
- The APIC defaults to C(private) when unset during creation.
type: list
choices:
- private
- public
- shared
subnet_control:
description:
- Determines the Subnet's Control State.
- The C(querier_ip) option is used to treat the gateway_ip as an IGMP querier source IP.
- The C(nd_ra) option is used to treate the gateway_ip address as a Neighbor Discovery Router Advertisement Prefix.
- The C(no_gw) option is used to remove default gateway functionality from the gateway address.
- The APIC defaults to C(nd_ra) when unset during creation.
choices: [ nd_ra, no_gw, querier_ip, unspecified ]
subnet_name:
description:
- The name of the Subnet.
aliases: [ name ]
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Create a tenant
aci_tenant:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
state: present
delegate_to: localhost
- name: Create a bridge domain
aci_bd:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
bd: database
state: present
delegate_to: localhost
- name: Create a subnet
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
state: present
delegate_to: localhost
- name: Create a subnet with options
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
bd: database
subnet_name: sql
gateway: 10.1.2.1
mask: 23
description: SQL Servers
scope: public
route_profile_l3_out: corp
route_profile: corp_route_profile
state: present
delegate_to: localhost
- name: Update a subnets scope to private and shared
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
scope: [private, shared]
state: present
delegate_to: localhost
- name: Get all subnets
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
- name: Get all subnets of specific gateway in specified tenant
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
gateway: 10.1.1.1
mask: 24
state: query
delegate_to: localhost
register: query_result
- name: Get specific subnet
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
state: query
delegate_to: localhost
register: query_result
- name: Delete a subnet
aci_bd_subnet:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
bd: database
gateway: 10.1.1.1
mask: 24
state: absent
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
SUBNET_CONTROL_MAPPING = dict(nd_ra='nd', no_gw='no-default-gateway', querier_ip='querier', unspecified='')
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
bd=dict(type='str', aliases=['bd_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
enable_vip=dict(type='bool'),
gateway=dict(type='str', aliases=['gateway_ip']), # Not required for querying all objects
mask=dict(type='int', aliases=['subnet_mask']), # Not required for querying all objects
subnet_name=dict(type='str', aliases=['name']),
nd_prefix_policy=dict(type='str'),
preferred=dict(type='bool'),
route_profile=dict(type='str'),
route_profile_l3_out=dict(type='str'),
scope=dict(type='list', choices=['private', 'public', 'shared']),
subnet_control=dict(type='str', choices=['nd_ra', 'no_gw', 'querier_ip', 'unspecified']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[['gateway', 'mask']],
required_if=[
['state', 'present', ['bd', 'gateway', 'mask', 'tenant']],
['state', 'absent', ['bd', 'gateway', 'mask', 'tenant']],
],
)
aci = ACIModule(module)
description = module.params['description']
enable_vip = aci.boolean(module.params['enable_vip'])
tenant = module.params['tenant']
bd = module.params['bd']
gateway = module.params['gateway']
mask = module.params['mask']
if mask is not None and mask not in range(0, 129):
# TODO: split checkes between IPv4 and IPv6 Addresses
module.fail_json(msg='Valid Subnet Masks are 0 to 32 for IPv4 Addresses and 0 to 128 for IPv6 addresses')
if gateway is not None:
gateway = '{0}/{1}'.format(gateway, str(mask))
subnet_name = module.params['subnet_name']
nd_prefix_policy = module.params['nd_prefix_policy']
preferred = aci.boolean(module.params['preferred'])
route_profile = module.params['route_profile']
route_profile_l3_out = module.params['route_profile_l3_out']
scope = module.params['scope']
if scope is not None:
if 'private' in scope and 'public' in scope:
module.fail_json(msg="Parameter 'scope' cannot be both 'private' and 'public', got: %s" % scope)
else:
scope = ','.join(sorted(scope))
state = module.params['state']
subnet_control = module.params['subnet_control']
if subnet_control:
subnet_control = SUBNET_CONTROL_MAPPING[subnet_control]
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvBD',
aci_rn='BD-{0}'.format(bd),
module_object=bd,
target_filter={'name': bd},
),
subclass_2=dict(
aci_class='fvSubnet',
aci_rn='subnet-[{0}]'.format(gateway),
module_object=gateway,
target_filter={'ip': gateway},
),
child_classes=['fvRsBDSubnetToProfile', 'fvRsNdPfxPol'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvSubnet',
class_config=dict(
ctrl=subnet_control,
descr=description,
ip=gateway,
name=subnet_name,
preferred=preferred,
scope=scope,
virtual=enable_vip,
),
child_configs=[
{'fvRsBDSubnetToProfile': {'attributes': {'tnL3extOutName': route_profile_l3_out, 'tnRtctrlProfileName': route_profile}}},
{'fvRsNdPfxPol': {'attributes': {'tnNdPfxPolName': nd_prefix_policy}}},
],
)
aci.get_diff(aci_class='fvSubnet')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | 8,766,096,382,707,478,000 | 29.479545 | 141 | 0.624189 | false |
grilo/ansible-1 | lib/ansible/modules/system/ufw.py | 26 | 11678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ahti Kitsik <[email protected]>
# (c) 2014, Jarno Keskikangas <[email protected]>
# (c) 2013, Aleksey Ovcharenko <[email protected]>
# (c) 2013, James Martin <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
version_added: 1.6
author:
- "Aleksey Ovcharenko (@ovcharenko)"
- "Jarno Keskikangas (@pyykkis)"
- "Ahti Kitsik (@ahtik)"
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
required: false
choices: ['enabled', 'disabled', 'reloaded', 'reset']
policy:
description:
- Change the default policy for incoming or outgoing traffic.
required: false
aliases: ['default']
choices: ['allow', 'deny', 'reject']
direction:
description:
- Select direction for a rule or default policy command.
required: false
choices: ['in', 'out', 'incoming', 'outgoing', 'routed']
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
choices: ['on', 'off', 'low', 'medium', 'high', 'full']
required: false
insert:
description:
- Insert the corresponding rule as rule number NUM
required: false
rule:
description:
- Add firewall rule
required: false
choices: ['allow', 'deny', 'reject', 'limit']
log:
description:
- Log new connections matched to this rule
required: false
choices: ['yes', 'no']
from_ip:
description:
- Source IP address.
required: false
aliases: ['from', 'src']
default: 'any'
from_port:
description:
- Source port.
required: false
to_ip:
description:
- Destination IP address.
required: false
aliases: ['to', 'dest']
default: 'any'
to_port:
description:
- Destination port.
required: false
aliases: ['port']
proto:
description:
- TCP/IP protocol.
choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']
required: false
name:
description:
- Use profile located in C(/etc/ufw/applications.d)
required: false
aliases: ['app']
delete:
description:
- Delete rule.
required: false
choices: ['yes', 'no']
interface:
description:
- Specify interface for rule.
required: false
aliases: ['if']
route:
description:
- Apply the rule to routed/forwarded packets.
required: false
choices: ['yes', 'no']
comment:
description:
- Add a comment to the rule. Requires UFW version >=0.35.
required: false
version_added: "2.4"
'''
EXAMPLES = '''
# Allow everything and enable UFW
- ufw:
state: enabled
policy: allow
# Set logging
- ufw:
logging: on
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
- ufw:
rule: reject
port: auth
log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
- ufw:
rule: limit
port: ssh
proto: tcp
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
# a rule=allow task can leave those ports exposed. Either use delete=yes
# or a separate state=reset task)
- ufw:
rule: allow
name: OpenSSH
# Delete OpenSSH rule
- ufw:
rule: allow
name: OpenSSH
delete: yes
# Deny all access to port 53:
- ufw:
rule: deny
port: 53
# Allow port range 60000-61000
- ufw:
rule: allow
port: '60000:61000'
# Allow all access to tcp port 80:
- ufw:
rule: allow
port: 80
proto: tcp
# Allow all access from RFC1918 networks to this host:
- ufw:
rule: allow
src: '{{ item }}'
with_items:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
# Deny access to udp port 514 from host 1.2.3.4 and include a comment:
- ufw:
rule: deny
proto: udp
src: 1.2.3.4
port: 514
comment: "Block syslog"
# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
- ufw:
rule: allow
interface: eth0
direction: in
proto: udp
src: 1.2.3.5
from_port: 5469
dest: 1.2.3.4
to_port: 5469
# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host.
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
- ufw:
rule: deny
proto: tcp
src: '2001:db8::/32'
port: 25
# Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24.
# Can be used to further restrict a global FORWARD policy set to allow
- ufw:
rule: deny
route: yes
src: 1.2.3.0/24
dest: 4.5.6.0/24
'''
import re
from operator import itemgetter
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']),
default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']),
direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete = dict(default=False, type='bool'),
route = dict(default=False, type='bool'),
insert = dict(default=None),
rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']),
interface = dict(default=None, aliases=['if']),
log = dict(default=False, type='bool'),
from_ip = dict(default='any', aliases=['src', 'from']),
from_port = dict(default=None),
to_ip = dict(default='any', aliases=['dest', 'to']),
to_port = dict(default=None, aliases=['port']),
proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']),
app = dict(default=None, aliases=['name']),
comment = dict(default=None, type='str')
),
supports_check_mode = True,
mutually_exclusive = [['app', 'proto', 'logging']]
)
cmds = []
def execute(cmd):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg=err or out)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
rc, out, err = module.run_command("%s --version" % ufw_bin)
if rc != 0:
module.fail_json(
msg="Failed to get ufw version.", rc=rc, out=out, err=err
)
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if(params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
# Save the pre state and rules in order to recognize changes
(_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
# Execute commands
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = { 'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset' }
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
execute(cmd + [[command], [value]])
elif command == 'default':
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
# Rules are constructed according to the long format
#
# ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s" ), ('from_port', "port %s" ),
('to_ip', "to %s" ), ('to_port', "port %s" ),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
execute(cmd)
# Get the new state
(_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
if __name__ == '__main__':
main()
| gpl-3.0 | 3,835,772,466,421,743,600 | 30.647696 | 146 | 0.587601 | false |
python-ivi/python-ivi | ivi/agilent/agilentMSOX3032A.py | 2 | 1695 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentMSOX3032A(agilent3000A):
"Agilent InfiniiVision MSOX3032A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO-X 3032A')
super(agilentMSOX3032A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| mit | -6,053,007,314,918,584,000 | 37.522727 | 86 | 0.736873 | false |
tpltnt/scapy | scapy/modules/voip.py | 17 | 4095 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
VoIP (Voice over IP) related functions
"""
import os
###################
## Testing stuff ##
###################
from fcntl import fcntl
from scapy.sendrecv import sniff
from scapy.layers.inet import IP,UDP
from scapy.layers.rtp import RTP
from scapy.utils import get_temp_file
def merge(x,y,sample_size=2):
if len(x) > len(y):
y += "\x00"*(len(x)-len(y))
elif len(x) < len(y):
x += "\x00"*(len(y)-len(x))
m = ""
ss=sample_size
for i in range(len(x)/ss):
m += x[ss*i:ss*(i+1)]+y[ss*i:ss*(i+1)]
return m
# return "".join(map(str.__add__, x, y))
def voip_play(s1,list=None,**kargs):
FIFO=get_temp_file()
FIFO1=FIFO % 1
FIFO2=FIFO % 2
os.mkfifo(FIFO1)
os.mkfifo(FIFO2)
try:
os.system("soxmix -t .ul %s -t .ul %s -t ossdsp /dev/dsp &" % (FIFO1,FIFO2))
c1=open(FIFO1,"w", 4096)
c2=open(FIFO2,"w", 4096)
fcntl.fcntl(c1.fileno(),fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(c2.fileno(),fcntl.F_SETFL, os.O_NONBLOCK)
# dsp,rd = os.popen2("sox -t .ul -c 2 - -t ossdsp /dev/dsp")
def play(pkt,last=[]):
if not pkt:
return
if not pkt.haslayer(UDP):
return
ip=pkt.getlayer(IP)
if s1 in [ip.src, ip.dst]:
if not last:
last.append(pkt)
return
load=last.pop()
# x1 = load.load[12:]
c1.write(load.load[12:])
if load.getlayer(IP).src == ip.src:
# x2 = ""
c2.write("\x00"*len(load.load[12:]))
last.append(pkt)
else:
# x2 = pkt.load[:12]
c2.write(pkt.load[12:])
# dsp.write(merge(x1,x2))
if list is None:
sniff(store=0, prn=play, **kargs)
else:
for p in list:
play(p)
finally:
os.unlink(FIFO1)
os.unlink(FIFO2)
def voip_play1(s1,list=None,**kargs):
dsp,rd = os.popen2("sox -t .ul - -t ossdsp /dev/dsp")
def play(pkt):
if not pkt:
return
if not pkt.haslayer(UDP):
return
ip=pkt.getlayer(IP)
if s1 in [ip.src, ip.dst]:
dsp.write(pkt.getlayer(conf.raw_layer).load[12:])
try:
if list is None:
sniff(store=0, prn=play, **kargs)
else:
for p in list:
play(p)
finally:
dsp.close()
rd.close()
def voip_play2(s1,**kargs):
dsp,rd = os.popen2("sox -t .ul -c 2 - -t ossdsp /dev/dsp")
def play(pkt,last=[]):
if not pkt:
return
if not pkt.haslayer(UDP):
return
ip=pkt.getlayer(IP)
if s1 in [ip.src, ip.dst]:
if not last:
last.append(pkt)
return
load=last.pop()
x1 = load.load[12:]
# c1.write(load.load[12:])
if load.getlayer(IP).src == ip.src:
x2 = ""
# c2.write("\x00"*len(load.load[12:]))
last.append(pkt)
else:
x2 = pkt.load[:12]
# c2.write(pkt.load[12:])
dsp.write(merge(x1,x2))
sniff(store=0, prn=play, **kargs)
def voip_play3(lst=None,**kargs):
dsp,rd = os.popen2("sox -t .ul - -t ossdsp /dev/dsp")
try:
def play(pkt, dsp=dsp):
if pkt and pkt.haslayer(UDP) and pkt.haslayer(conf.raw_layer):
dsp.write(pkt.getlayer(RTP).load)
if lst is None:
sniff(store=0, prn=play, **kargs)
else:
for p in lst:
play(p)
finally:
try:
dsp.close()
rd.close()
except:
pass
| gpl-2.0 | 3,752,407,651,174,064,600 | 26.483221 | 84 | 0.473016 | false |
h3llrais3r/Auto-Subliminal | lib/websocket/_core.py | 3 | 17905 | """
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
from __future__ import print_function
import socket
import struct
import threading
import time
import six
# websocket modules
from ._abnf import *
from ._exceptions import *
from ._handshake import *
from ._http import *
from ._logging import *
from ._socket import *
from ._ssl_compat import *
from ._utils import *
__all__ = ['WebSocket', 'create_connection']
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is an echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
skip_utf8_validation: skip utf8 validation.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False,
skip_utf8_validation=False, **_):
"""
Initialize WebSocket object.
"""
self.sock_opt = sock_opt(sockopt, sslopt)
self.handshake_response = None
self.sock = None
self.connected = False
self.get_mask_key = get_mask_key
# These buffer over the build-up of a single frame.
self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
self.cont_frame = continuous_frame(
fire_cont_frame, skip_utf8_validation)
if enable_multithread:
self.lock = threading.Lock()
self.readlock = threading.Lock()
else:
self.lock = NoLock()
self.readlock = NoLock()
def __iter__(self):
"""
Allow iteration over websocket, implying sequential `recv` executions.
"""
while True:
yield self.recv()
def __next__(self):
return self.recv()
def next(self):
return self.__next__()
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the func takes 1 argument as integer.
The argument means length of mask key.
This func must return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock_opt.timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock_opt.timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None
subprotocol = property(getsubprotocol)
def getstatus(self):
"""
get handshake status
"""
if self.handshake_response:
return self.handshake_response.status
else:
return None
status = property(getstatus)
def getheaders(self):
"""
get handshake response header
"""
if self.handshake_response:
return self.handshake_response.headers
else:
return None
def is_ssl(self):
return isinstance(self.sock, ssl.SSLSocket)
headers = property(getheaders)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"suppress_origin" -> suppress outputting origin header.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"redirect_limit" -> number of redirects to follow.
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket.
"""
# FIXME: "subprotocols" are getting lost, not passed down
# FIXME: "header", "cookie", "origin" and "host" too
self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout)
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
for attempt in range(options.pop('redirect_limit', 3)):
if self.handshake_response.status in SUPPORTED_REDIRECT_STATUSES:
url = self.handshake_response.headers['location']
self.sock.close()
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
trace("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
with self.readlock:
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException(
"Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
self.cont_frame.validate(frame)
self.cont_frame.add(frame)
if self.cont_frame.is_fire(frame):
return self.cont_frame.extract(frame)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return frame.opcode, frame
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException(
"Ping message is too long")
if control_frame:
return frame.opcode, frame
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return frame.opcode, frame
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
return self.frame_buffer.recv_frame()
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
timeout: timeout until receive a close frame.
If None, it will wait forever until receive a close frame.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) +
reason, ABNF.OPCODE_CLOSE)
sock_timeout = self.sock.gettimeout()
self.sock.settimeout(timeout)
start_time = time.time()
while timeout is None or time.time() - start_time < timeout:
try:
frame = self.recv_frame()
if frame.opcode != ABNF.OPCODE_CLOSE:
continue
if isEnabledForError():
recv_status = struct.unpack("!H", frame.data[0:2])[0]
if recv_status != STATUS_NORMAL:
error("close status: " + repr(recv_status))
break
except:
break
self.sock.settimeout(sock_timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"""close socket, immediately."""
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
return send(self.sock, data)
def _recv(self, bufsize):
try:
return recv(self.sock, bufsize)
except WebSocketConnectionClosedException:
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
raise
def create_connection(url, timeout=None, class_=WebSocket, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied,
the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
class_: class to instantiate when creating the connection. It has to implement
settimeout and connect. It's __init__ should be compatible with
WebSocket.__init__, i.e. accept all of it's kwargs.
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"suppress_origin" -> suppress outputting origin header.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"enable_multithread" -> enable lock for multithread.
"redirect_limit" -> number of redirects to follow.
"sockopt" -> socket options
"sslopt" -> ssl option
"subprotocols" - array of available sub protocols.
default is None.
"skip_utf8_validation" - skip utf8 validation.
"socket" - pre-initialized stream socket.
"""
sockopt = options.pop("sockopt", [])
sslopt = options.pop("sslopt", {})
fire_cont_frame = options.pop("fire_cont_frame", False)
enable_multithread = options.pop("enable_multithread", False)
skip_utf8_validation = options.pop("skip_utf8_validation", False)
websock = class_(sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=fire_cont_frame,
enable_multithread=enable_multithread,
skip_utf8_validation=skip_utf8_validation, **options)
websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
websock.connect(url, **options)
return websock
| gpl-3.0 | -2,819,112,851,762,709,000 | 33.76699 | 90 | 0.568277 | false |
pymedusa/Medusa | medusa/helper/metadata.py | 3 | 1464 | # coding=utf-8
from __future__ import unicode_literals
import logging
from builtins import str
from medusa import app
from medusa.logger.adapters.style import BraceAdapter
from medusa.session.core import MedusaSafeSession
from six import itervalues
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
meta_session = MedusaSafeSession()
def get_image(url, img_no=None):
if url is None:
return None
# if they provided a fanart number try to use it instead
if img_no is not None:
temp_url = url.split('-')[0] + '-' + str(img_no) + '.jpg'
else:
temp_url = url
log.debug(u'Fetching image from {url}', {'url': temp_url})
# TODO: SESSION: Check if this needs exception handling.
image_data = meta_session.get(temp_url)
if not image_data:
log.warning(u'There was an error trying to retrieve the image, aborting')
return
return image_data.content
def needs_metadata(episode):
"""Check if an episode needs metadata.
:param episode: Episode object.
:return: True if needed, None otherwise
"""
if not episode.is_location_valid():
return
for provider in itervalues(app.metadata_provider_dict):
if provider.episode_metadata and not provider.has_episode_metadata(episode):
return True
if provider.episode_thumbnails and not provider.has_episode_thumb(episode):
return True
| gpl-3.0 | 331,737,201,612,104,400 | 25.618182 | 84 | 0.68306 | false |
caesar2164/edx-platform | common/djangoapps/xblock_django/tests/test_api.py | 16 | 6314 | """
Tests related to XBlock support API.
"""
from xblock_django.models import XBlockConfiguration, XBlockStudioConfiguration, XBlockStudioConfigurationFlag
from xblock_django.api import deprecated_xblocks, disabled_xblocks, authorable_xblocks
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
class XBlockSupportTestCase(CacheIsolationTestCase):
"""
Tests for XBlock Support methods.
"""
def setUp(self):
super(XBlockSupportTestCase, self).setUp()
# Set up XBlockConfigurations for disabled and deprecated states
block_config = [
("poll", True, True),
("survey", False, True),
("done", True, False),
]
for name, enabled, deprecated in block_config:
XBlockConfiguration(name=name, enabled=enabled, deprecated=deprecated).save()
# Set up XBlockStudioConfigurations for studio support level
studio_block_config = [
("poll", "", False, XBlockStudioConfiguration.FULL_SUPPORT), # FULL_SUPPORT negated by enabled=False
("survey", "", True, XBlockStudioConfiguration.UNSUPPORTED),
("done", "", True, XBlockStudioConfiguration.FULL_SUPPORT),
("problem", "", True, XBlockStudioConfiguration.FULL_SUPPORT),
("problem", "multiple_choice", True, XBlockStudioConfiguration.FULL_SUPPORT),
("problem", "circuit_schematic_builder", True, XBlockStudioConfiguration.UNSUPPORTED),
("problem", "ora1", False, XBlockStudioConfiguration.FULL_SUPPORT),
("html", "zoom", True, XBlockStudioConfiguration.PROVISIONAL_SUPPORT),
("split_module", "", True, XBlockStudioConfiguration.UNSUPPORTED),
]
for name, template, enabled, support_level in studio_block_config:
XBlockStudioConfiguration(name=name, template=template, enabled=enabled, support_level=support_level).save()
def test_deprecated_blocks(self):
""" Tests the deprecated_xblocks method """
deprecated_xblock_names = [block.name for block in deprecated_xblocks()]
self.assertItemsEqual(["poll", "survey"], deprecated_xblock_names)
XBlockConfiguration(name="poll", enabled=True, deprecated=False).save()
deprecated_xblock_names = [block.name for block in deprecated_xblocks()]
self.assertItemsEqual(["survey"], deprecated_xblock_names)
def test_disabled_blocks(self):
""" Tests the disabled_xblocks method """
disabled_xblock_names = [block.name for block in disabled_xblocks()]
self.assertItemsEqual(["survey"], disabled_xblock_names)
XBlockConfiguration(name="poll", enabled=False, deprecated=True).save()
disabled_xblock_names = [block.name for block in disabled_xblocks()]
self.assertItemsEqual(["survey", "poll"], disabled_xblock_names)
def test_authorable_blocks_empty_model(self):
"""
Tests authorable_xblocks returns an empty list if XBlockStudioConfiguration table is empty, regardless
of whether or not XBlockStudioConfigurationFlag is enabled.
"""
XBlockStudioConfiguration.objects.all().delete()
self.assertFalse(XBlockStudioConfigurationFlag.is_enabled())
self.assertEqual(0, len(authorable_xblocks(allow_unsupported=True)))
XBlockStudioConfigurationFlag(enabled=True).save()
self.assertEqual(0, len(authorable_xblocks(allow_unsupported=True)))
def test_authorable_blocks(self):
"""
Tests authorable_xblocks when name is not specified.
"""
authorable_xblock_names = [block.name for block in authorable_xblocks()]
self.assertItemsEqual(["done", "problem", "problem", "html"], authorable_xblock_names)
# Note that "survey" is disabled in XBlockConfiguration, but it is still returned by
# authorable_xblocks because it is marked as enabled and unsupported in XBlockStudioConfiguration.
# Since XBlockConfiguration is a blacklist and relates to xblock type, while XBlockStudioConfiguration
# is a whitelist and uses a combination of xblock type and template (and in addition has a global feature flag),
# it is expected that Studio code will need to filter by both disabled_xblocks and authorable_xblocks.
authorable_xblock_names = [block.name for block in authorable_xblocks(allow_unsupported=True)]
self.assertItemsEqual(
["survey", "done", "problem", "problem", "problem", "html", "split_module"],
authorable_xblock_names
)
def test_authorable_blocks_by_name(self):
"""
Tests authorable_xblocks when name is specified.
"""
def verify_xblock_fields(name, template, support_level, block):
"""
Verifies the returned xblock state.
"""
self.assertEqual(name, block.name)
self.assertEqual(template, block.template)
self.assertEqual(support_level, block.support_level)
# There are no xblocks with name video.
authorable_blocks = authorable_xblocks(name="video")
self.assertEqual(0, len(authorable_blocks))
# There is only a single html xblock.
authorable_blocks = authorable_xblocks(name="html")
self.assertEqual(1, len(authorable_blocks))
verify_xblock_fields("html", "zoom", XBlockStudioConfiguration.PROVISIONAL_SUPPORT, authorable_blocks[0])
authorable_blocks = authorable_xblocks(name="problem", allow_unsupported=True)
self.assertEqual(3, len(authorable_blocks))
no_template = None
circuit = None
multiple_choice = None
for block in authorable_blocks:
if block.template == '':
no_template = block
elif block.template == 'circuit_schematic_builder':
circuit = block
elif block.template == 'multiple_choice':
multiple_choice = block
verify_xblock_fields("problem", "", XBlockStudioConfiguration.FULL_SUPPORT, no_template)
verify_xblock_fields("problem", "circuit_schematic_builder", XBlockStudioConfiguration.UNSUPPORTED, circuit)
verify_xblock_fields("problem", "multiple_choice", XBlockStudioConfiguration.FULL_SUPPORT, multiple_choice)
| agpl-3.0 | -1,137,003,699,196,406,500 | 47.945736 | 120 | 0.674216 | false |
chintak/scikit-image | skimage/novice/__init__.py | 5 | 1796 | """
skimage.novice
==============
A special Python image submodule for beginners.
Description
-----------
``skimage.novice`` provides a simple image manipulation interface for
beginners. It allows for easy loading, manipulating, and saving of image
files.
This module is primarily intended for teaching and differs significantly from
the normal, array-oriented image functions used by scikit-image.
.. note::
This module uses the Cartesian coordinate system, where the origin is at
the lower-left corner instead of the upper-right and the order is x, y
instead of row, column.
Example
-------
We can create a Picture object open opening an image file
>>> from skimage import novice
>>> from skimage import data
>>> picture = novice.open(data.data_dir + '/chelsea.png')
Pictures know their format
>>> picture.format
'png'
... and where they came from
>>> picture.path.endswith('chelsea.png')
True
... and their size
>>> picture.size
(451, 300)
>>> picture.width
451
Changing `size` resizes the picture.
>>> picture.size = (45, 30)
You can iterate over pixels, which have RGB values between 0 and 255,
and know their location in the picture.
>>> for pixel in picture:
... if (pixel.red > 128) and (pixel.x < picture.width):
... pixel.red /= 2
Pictures know if they've been modified from the original file
>>> picture.modified
True
>>> print(picture.path)
None
Pictures can be indexed like arrays
>>> picture[0:20, 0:20] = (0, 0, 0)
Saving the picture updates the path attribute, format, and modified state.
>>> picture.save('save-demo.jpg')
>>> picture.path.endswith('save-demo.jpg')
True
>>> picture.format
'jpeg'
>>> picture.modified
False
"""
from ._novice import Picture, open, colors, color_dict
__all__ = ['Picture', 'open', 'colors', 'color_dict']
| bsd-3-clause | -8,614,276,178,129,870,000 | 22.946667 | 77 | 0.7049 | false |
ankurankan/scikit-learn | sklearn/metrics/ranking.py | 4 | 23114 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.stats import rankdata
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
@deprecated("Function 'auc_score' has been renamed to "
"'roc_auc_score' and will be removed in release 0.16.")
def auc_score(y_true, y_score):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> auc_score(y_true, y_scores)
0.75
"""
return roc_auc_score(y_true, y_score)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator"
and not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += np.divide(L, rank, dtype=float).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average the number
of labels in ``y_true` per sample.
Ties in `y_scores` are broken by giving maximal rank that would have
been assigned to all tied values.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
| bsd-3-clause | 5,454,166,240,634,071,000 | 33.344725 | 79 | 0.621139 | false |
yestech/gae-django-template | djangoappengine/tests/field_options.py | 23 | 3783 | from django.test import TestCase
from django.db.utils import DatabaseError
from django.db.models.fields import NOT_PROVIDED
from .testmodels import FieldsWithOptionsModel
from google.appengine.api.datastore import Get
from google.appengine.ext.db import Key
from google.appengine.api.datastore_types import Text, Category, Email, Link, \
PhoneNumber, PostalAddress, Text, Blob, ByteString, GeoPt, IM, Key, \
Rating, BlobKey
from google.appengine.api import users
import datetime
class FieldOptionsTest(TestCase):
def test_options(self):
entity = FieldsWithOptionsModel()
# try to save the entity with non-nullable field time set to None, should
# raise an exception
self.assertRaises(DatabaseError, entity.save)
time = datetime.datetime.now().time()
entity.time = time
entity.save()
# check if primary_key=True is set correctly for the saved entity
self.assertEquals(entity.pk, u'[email protected]')
gae_entity = Get(Key.from_path(FieldsWithOptionsModel._meta.db_table,
entity.pk))
self.assertTrue(gae_entity is not None)
self.assertEquals(gae_entity.key().name(), u'[email protected]')
# check if default values are set correctly on the db level,
# primary_key field is not stored at the db level
for field in FieldsWithOptionsModel._meta.local_fields:
if field.default and field.default != NOT_PROVIDED and not \
field.primary_key:
self.assertEquals(gae_entity[field.column], field.default)
elif field.column == 'time':
self.assertEquals(gae_entity[field.column], datetime.datetime(
1970, 1, 1, time.hour, time.minute, time.second, time.microsecond))
elif field.null and field.editable:
self.assertEquals(gae_entity[field.column], None)
# check if default values are set correct on the model instance level
entity = FieldsWithOptionsModel.objects.get()
for field in FieldsWithOptionsModel._meta.local_fields:
if field.default and field.default != NOT_PROVIDED:
self.assertEquals(getattr(entity, field.column), field.default)
elif field.column == 'time':
self.assertEquals(getattr(entity, field.column), time)
elif field.null and field.editable:
self.assertEquals(getattr(entity, field.column), None)
# check if nullable field with default values can be set to None
entity.slug = None
entity.positiv_small_integer = None
try:
entity.save()
except:
self.fail()
# check if slug and positiv_small_integer will be retrieved with values
# set to None (on db level and model instance level)
gae_entity = Get(Key.from_path(FieldsWithOptionsModel._meta.db_table,
entity.pk))
self.assertEquals(gae_entity[FieldsWithOptionsModel._meta.get_field_by_name(
'slug')[0].column], None)
self.assertEquals(gae_entity[FieldsWithOptionsModel._meta.get_field_by_name(
'positiv_small_integer')[0].column], None)
# on the model instance level
entity = FieldsWithOptionsModel.objects.get()
self.assertEquals(getattr(entity, FieldsWithOptionsModel._meta.get_field_by_name(
'slug')[0].column), None)
self.assertEquals(getattr(entity, FieldsWithOptionsModel._meta.get_field_by_name(
'positiv_small_integer')[0].column), None)
# TODO: check db_column option
# TODO: change the primary key and check if a new instance with the
# changed primary key will be saved (not in this test class)
| bsd-3-clause | -5,313,627,065,416,167,000 | 46.886076 | 89 | 0.662437 | false |
smarkets/hal | plugins/youtube.py | 1 | 1539 | """
This plugin requires the following environment variable to be set:
* YOUTUBE_API_KEY - YouTube V3 API application key
"""
import os
import random
import requests
__commands__ = '''
hal (youtube|yt) [me] <query> - shows link to random top video matching query on YouTube
'''
def plugin(bot):
@bot.respond(r'(youtube|yt)( me)? (.+)')
def youtube(response):
query = response.match.group(3)
try:
api_key = os.environ['YOUTUBE_API_KEY']
except KeyError:
response.send('YOUTUBE_API_KEY not provided')
return
result = requests.get(
'https://www.googleapis.com/youtube/v3/search',
params={
'order': 'relevance',
'q': query,
'part': 'id',
'key': api_key,
},
)
if result.status_code != 200:
response.send('Query failed with HTTP %s' % (result.status_code,))
return
video_urls = get_urls_from_youtube_response(result.json())
if video_urls:
response.send(random.choice(video_urls))
else:
response.send('Sorry, %s not found you YouTube' % (query,))
def get_urls_from_youtube_response(response):
items = response['items']
item_ids = [i['id'] for i in items]
video_item_ids = [i for i in item_ids if i['kind'] == 'youtube#video']
video_ids = [i['videoId'] for i in video_item_ids]
return ['https://youtube.com/watch?v=%s' % vid for vid in video_ids]
| mit | 6,235,945,572,054,341,000 | 27.5 | 92 | 0.565952 | false |
haveagr8day/AptMedium | tests/test_0001-init.py | 1 | 1348 | from .shared_test_code import init_cwd, init_non_cwd
from apt_medium.apt_medium import load_medium_state
import os
import pytest
import socket
def verify(initDir):
os.chdir(initDir)
hostname = socket.gethostname()
assert os.path.isdir(os.path.join('archives', 'partial'))
assert os.path.isdir(os.path.join('lists', 'partial'))
assert os.path.isfile('medium_state')
assert os.path.isfile(os.path.join('system_info', hostname, 'dpkg-status'))
assert os.path.isfile(os.path.join('system_info', hostname, 'etc', 'apt', 'apt.conf'))
assert os.path.isdir(os.path.join('system_info', hostname, 'etc', 'apt', 'apt.conf.d'))
assert os.path.isfile(os.path.join('system_info', hostname, 'etc', 'apt', 'apt-medium.conf'))
assert os.path.isdir(os.path.join('var', 'log', 'apt'))
state = load_medium_state()
assert 'download_queue' in state
assert 'install_queue' in state
assert hostname in state['download_queue']
assert hostname in state['install_queue']
assert state['download_queue'][hostname] == []
assert state['install_queue'][hostname] == []
def test_init_cwd():
with init_cwd() as (retCode, initDir):
assert retCode == 0
verify(initDir)
def test_init_non_cwd():
with init_non_cwd() as (retCode, initDir):
assert retCode == 0
verify(initDir)
| gpl-2.0 | 8,735,521,876,838,718,000 | 38.647059 | 97 | 0.666914 | false |
xaviercobain88/framework-python | build/lib.linux-i686-2.7/openerp/addons/mrp/mrp.py | 3 | 56286 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp import SUPERUSER_ID
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class mrp_workcenter(osv.osv):
_name = 'mrp.workcenter'
_description = 'Work Center'
_inherits = {'resource.resource':"resource_id"}
_columns = {
'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."),
'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."),
'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."),
'time_start': fields.float('Time before prod.', help="Time in hours for the setup."),
'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."),
'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."),
'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."),
'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('type','!=','view')]),
'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True),
'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."),
}
_defaults = {
'capacity_per_cycle': 1.0,
'resource_type': 'material',
}
def on_change_product_cost(self, cr, uid, ids, product_id, context=None):
value = {}
if product_id:
cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'costs_hour': cost.standard_price}
return {'value': value}
mrp_workcenter()
class mrp_routing(osv.osv):
"""
For specifying the routings of Work Centers.
"""
_name = 'mrp.routing'
_description = 'Routing'
_columns = {
'name': fields.char('Name', size=64, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."),
'code': fields.char('Code', size=8),
'note': fields.text('Description'),
'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers'),
'location_id': fields.many2one('stock.location', 'Production Location',
help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations."
),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'active': lambda *a: 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context)
}
mrp_routing()
class mrp_routing_workcenter(osv.osv):
"""
Defines working cycles and hours of a Work Center using routings.
"""
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence'
_columns = {
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'name': fields.char('Name', size=64, required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."),
'cycle_nbr': fields.float('Number of Cycles', required=True,
help="Number of iterations this work center has to do in the specified operation of the routing."),
'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."),
'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade',
help="Routing indicates all the Work Centers used, for how long and/or cycles." \
"If Routing is indicated then,the third tab of a production order (Work Centers) will be automatically pre-completed."),
'note': fields.text('Description'),
'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'cycle_nbr': lambda *a: 1.0,
'hour_nbr': lambda *a: 0.0,
}
mrp_routing_workcenter()
class mrp_bom(osv.osv):
"""
Defines bills of material for a product.
"""
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit = ['mail.thread']
def _child_compute(self, cr, uid, ids, name, arg, context=None):
""" Gets child bom.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param name: Name of the field
@param arg: User defined argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values
"""
result = {}
if context is None:
context = {}
bom_obj = self.pool.get('mrp.bom')
bom_id = context and context.get('active_id', False) or False
cr.execute('select id from mrp_bom')
if all(bom_id != r[0] for r in cr.fetchall()):
ids.sort()
bom_id = ids[0]
bom_parent = bom_obj.browse(cr, uid, bom_id, context=context)
for bom in self.browse(cr, uid, ids, context=context):
if (bom_parent) or (bom.id == bom_id):
result[bom.id] = map(lambda x: x.id, bom.bom_lines)
else:
result[bom.id] = []
if bom.bom_lines:
continue
ok = ((name=='child_complete_ids') and (bom.product_id.supply_method=='produce'))
if (bom.type=='phantom' or ok):
sids = bom_obj.search(cr, uid, [('bom_id','=',False),('product_id','=',bom.product_id.id)])
if sids:
bom2 = bom_obj.browse(cr, uid, sids[0], context=context)
result[bom.id] += map(lambda x: x.id, bom2.bom_lines)
return result
def _compute_type(self, cr, uid, ids, field_name, arg, context=None):
""" Sets particular method for the selected bom type.
@param field_name: Name of the field
@param arg: User defined argument
@return: Dictionary of values
"""
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.type == 'phantom' and not line.bom_id:
res[line.id] = 'set'
continue
if line.bom_lines or line.type == 'phantom':
continue
if line.product_id.supply_method == 'produce':
if line.product_id.procure_method == 'make_to_stock':
res[line.id] = 'stock'
else:
res[line.id] = 'order'
return res
_columns = {
'name': fields.char('Name', size=64),
'code': fields.char('Reference', size=16),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."),
'type': fields.selection([('normal','Normal BoM'),('phantom','Sets / Phantom')], 'BoM Type', required=True,
help= "If a by-product is used in several products, it can be useful to create its own BoM. "\
"Though if you don't want separated production orders for this by-product, select Set/Phantom as BoM type. "\
"If a Phantom BoM is used for a root product, it will be sold and shipped as a set of components, instead of being produced."),
'method': fields.function(_compute_type, string='Method', type='selection', selection=[('',''),('stock','On Stock'),('order','On Order'),('set','Set / Pack')]),
'date_start': fields.date('Valid From', help="Validity of this BoM or component. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of this BoM or component. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."),
'position': fields.char('Internal Reference', size=64, help="Reference to a position in an external plan."),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uos_qty': fields.float('Product UOS Qty'),
'product_uos': fields.many2one('product.uom', 'Product UOS', help="Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock."),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."),
'bom_lines': fields.one2many('mrp.bom', 'bom_id', 'BoM Lines'),
'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'property_ids': fields.many2many('mrp.property', 'mrp_bom_property_rel', 'bom_id','property_id', 'Properties'),
'child_complete_ids': fields.function(_child_compute, relation='mrp.bom', string="BoM Hierarchy", type='many2many'),
'company_id': fields.many2one('res.company','Company',required=True),
}
_defaults = {
'active': lambda *a: 1,
'product_efficiency': lambda *a: 1.0,
'product_qty': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c),
}
_order = "sequence"
_parent_name = "bom_id"
_sql_constraints = [
('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \
'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'),
]
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('select distinct bom_id from mrp_bom where id IN %s',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
def _check_product(self, cr, uid, ids, context=None):
all_prod = []
boms = self.browse(cr, uid, ids, context=context)
def check_bom(boms):
res = True
for bom in boms:
if bom.product_id.id in all_prod:
res = res and False
all_prod.append(bom.product_id.id)
lines = bom.bom_lines
if lines:
res = res and check_bom([bom_id for bom_id in lines if bom_id not in boms])
return res
return check_bom(boms)
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive BoM.', ['parent_id']),
(_check_product, 'BoM line product should not be same as BoM product.', ['product_id']),
]
def onchange_product_id(self, cr, uid, ids, product_id, name, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
return {'value': {'name': prod.name, 'product_uom': prod.uom_id.id}}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def _bom_find(self, cr, uid, product_id, product_uom, properties=None):
""" Finds BoM for particular product and product uom.
@param product_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if properties is None:
properties = []
cr.execute('select id from mrp_bom where product_id=%s and bom_id is null order by sequence', (product_id,))
ids = map(lambda x: x[0], cr.fetchall())
max_prop = 0
result = False
for bom in self.pool.get('mrp.bom').browse(cr, uid, ids):
prop = 0
for prop_id in bom.property_ids:
if prop_id.id in properties:
prop += 1
if (prop > max_prop) or ((max_prop == 0) and not result):
result = bom.id
max_prop = prop
return result
def _bom_explode(self, cr, uid, bom, factor, properties=None, addthis=False, level=0, routing_id=False):
""" Finds Products and Work Centers for related BoM for manufacturing order.
@param bom: BoM of particular product.
@param factor: Factor of product UoM.
@param properties: A List of properties Ids.
@param addthis: If BoM found then True else False.
@param level: Depth level to find BoM lines starts from 10.
@return: result: List of dictionaries containing product details.
result2: List of dictionaries containing Work Center details.
"""
routing_obj = self.pool.get('mrp.routing')
factor = factor / (bom.product_efficiency or 1.0)
factor = rounding(factor, bom.product_rounding)
if factor < bom.product_rounding:
factor = bom.product_rounding
result = []
result2 = []
phantom = False
if bom.type == 'phantom' and not bom.bom_lines:
newbom = self._bom_find(cr, uid, bom.product_id.id, bom.product_uom.id, properties)
if newbom:
res = self._bom_explode(cr, uid, self.browse(cr, uid, [newbom])[0], factor*bom.product_qty, properties, addthis=True, level=level+10)
result = result + res[0]
result2 = result2 + res[1]
phantom = True
else:
phantom = False
if not phantom:
if addthis and not bom.bom_lines:
result.append(
{
'name': bom.product_id.name,
'product_id': bom.product_id.id,
'product_qty': bom.product_qty * factor,
'product_uom': bom.product_uom.id,
'product_uos_qty': bom.product_uos and bom.product_uos_qty * factor or False,
'product_uos': bom.product_uos and bom.product_uos.id or False,
})
routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False
if routing:
for wc_use in routing.workcenter_lines:
wc = wc_use.workcenter_id
d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle)
mult = (d + (m and 1.0 or 0.0))
cycle = mult * wc_use.cycle_nbr
result2.append({
'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_id.name),
'workcenter_id': wc.id,
'sequence': level+(wc_use.sequence or 0),
'cycle': cycle,
'hour': float(wc_use.hour_nbr*mult + ((wc.time_start or 0.0)+(wc.time_stop or 0.0)+cycle*(wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)),
})
for bom2 in bom.bom_lines:
res = self._bom_explode(cr, uid, bom2, factor, properties, addthis=True, level=level+10)
result = result + res[0]
result2 = result2 + res[1]
return result, result2
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
bom_data = self.read(cr, uid, id, [], context=context)
default.update(name=_("%s (copy)") % (bom_data['name']), bom_id=False)
return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context)
def rounding(f, r):
import math
if not r:
return f
return math.ceil(f / r) * r
class mrp_production(osv.osv):
"""
Production Orders / Manufacturing Orders
"""
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates total hours and total no. of cycles for a production order.
@param prop: Name of field.
@param unknow_none:
@return: Dictionary of values.
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = {
'hour_total': 0.0,
'cycle_total': 0.0,
}
for wc in prod.workcenter_lines:
result[prod.id]['hour_total'] += wc.hour
result[prod.id]['cycle_total'] += wc.cycle
return result
def _src_id_default(self, cr, uid, ids, context=None):
src_location_id = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock', context=context)
return src_location_id.id
def _dest_id_default(self, cr, uid, ids, context=None):
dest_location_id = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock', context=context)
return dest_location_id.id
_columns = {
'name': fields.char('Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'origin': fields.char('Source Document', size=64, readonly=True, states={'draft': [('readonly', False)]},
help="Reference of the document that generated this production order request."),
'priority': fields.selection([('0','Not urgent'),('1','Normal'),('2','Urgent'),('3','Very Urgent')], 'Priority',
select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft':[('readonly',False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Product UoS Quantity', readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS', readonly=True, states={'draft': [('readonly', False)]}),
'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True,
readonly=True, states={'draft':[('readonly',False)]},
help="Location where the system will look for components."),
'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True,
readonly=True, states={'draft':[('readonly',False)]},
help="Location where the system will stock the finished products."),
'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft':[('readonly',False)]}),
'date_start': fields.datetime('Start Date', select=True, readonly=True),
'date_finished': fields.datetime('End Date', select=True, readonly=True),
'bom_id': fields.many2one('mrp.bom', 'Bill of Material', domain=[('bom_id','=',False)], readonly=True, states={'draft':[('readonly',False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product."),
'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft':[('readonly',False)]},
help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."),
'picking_id': fields.many2one('stock.picking', 'Picking List', readonly=True, ondelete="restrict",
help="This is the Internal Picking List that brings the finished product to the production plan"),
'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True),
'move_lines': fields.many2many('stock.move', 'mrp_production_move_ids', 'production_id', 'move_id', 'Products to Consume',
domain=[('state','not in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'move_lines2': fields.many2many('stock.move', 'mrp_production_move_ids', 'production_id', 'move_id', 'Consumed Products',
domain=[('state','in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce',
domain=[('state','not in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products',
domain=[('state','in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods',
readonly=True, states={'draft':[('readonly',False)]}),
'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation',
readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('picking_except', 'Picking Exception'), ('confirmed', 'Awaiting Raw Materials'),
('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')],
string='Status', readonly=True,
track_visibility='onchange',
help="When the production order is created the status is set to 'Draft'.\n\
If the order is confirmed the status is set to 'Waiting Goods'.\n\
If any exceptions are there, the status is set to 'Picking Exception'.\n\
If the stock is available then the status is set to 'Ready to Produce'.\n\
When the production gets started then the status is set to 'In Production'.\n\
When the production is over, the status is set to 'Done'."),
'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store=True),
'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store=True),
'user_id':fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True),
}
_defaults = {
'priority': lambda *a: '1',
'state': lambda *a: 'draft',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'product_qty': lambda *a: 1.0,
'user_id': lambda self, cr, uid, c: uid,
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'mrp.production') or '/',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c),
'location_src_id': _src_id_default,
'location_dest_id': _dest_id_default
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
_order = 'priority desc, date_planned asc';
def _check_qty(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
if order.product_qty <= 0:
return False
return True
_constraints = [
(_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']),
]
def unlink(self, cr, uid, ids, context=None):
for production in self.browse(cr, uid, ids, context=context):
if production.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a manufacturing order in state \'%s\'.') % production.state)
return super(mrp_production, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update({
'name': self.pool.get('ir.sequence').get(cr, uid, 'mrp.production'),
'move_lines' : [],
'move_lines2' : [],
'move_created_ids' : [],
'move_created_ids2' : [],
'product_lines' : [],
'move_prod_id' : False,
'picking_id' : False
})
return super(mrp_production, self).copy(cr, uid, id, default, context)
def location_id_change(self, cr, uid, ids, src, dest, context=None):
""" Changes destination location if source location is changed.
@param src: Source location id.
@param dest: Destination location id.
@return: Dictionary of values.
"""
if dest:
return {}
if src:
return {'value': {'location_dest_id': src}}
return {}
def product_id_change(self, cr, uid, ids, product_id, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
if not product_id:
return {'value': {
'product_uom': False,
'bom_id': False,
'routing_id': False
}}
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
bom_id = bom_obj._bom_find(cr, uid, product.id, product.uom_id and product.uom_id.id, [])
routing_id = False
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
product_uom_id = product.uom_id and product.uom_id.id or False
result = {
'product_uom': product_uom_id,
'bom_id': bom_id,
'routing_id': routing_id,
}
return {'value': result}
def bom_id_change(self, cr, uid, ids, bom_id, context=None):
""" Finds routing for changed BoM.
@param product: Id of product.
@return: Dictionary of values.
"""
if not bom_id:
return {'value': {
'routing_id': False
}}
bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
result = {
'routing_id': routing_id
}
return {'value': result}
def action_picking_except(self, cr, uid, ids):
""" Changes the state to Exception.
@return: True
"""
self.write(cr, uid, ids, {'state': 'picking_except'})
return True
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
if properties is None:
properties = []
results = []
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids):
p_ids = prod_line_obj.search(cr, SUPERUSER_ID, [('production_id', '=', production.id)], context=context)
prod_line_obj.unlink(cr, SUPERUSER_ID, p_ids, context=context)
w_ids = workcenter_line_obj.search(cr, SUPERUSER_ID, [('production_id', '=', production.id)], context=context)
workcenter_line_obj.unlink(cr, SUPERUSER_ID, w_ids, context=context)
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, production.product_id.id, production.product_uom.id, properties)
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find a bill of material for this product."))
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
res = bom_obj._bom_explode(cr, uid, bom_point, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id)
results = res[0]
results2 = res[1]
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line)
return len(results)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the production order and related stock moves.
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for production in self.browse(cr, uid, ids, context=context):
if production.state == 'confirmed' and production.picking_id.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel manufacturing order!'),
_('You must first cancel related internal picking attached to this manufacturing order.'))
if production.move_created_ids:
move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids])
move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines])
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for (production_id,name) in self.name_get(cr, uid, ids):
production = self.browse(cr, uid, production_id)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def action_production_end(self, cr, uid, ids, context=None):
""" Changes production state to Finish and writes finished date.
@return: True
"""
for production in self.browse(cr, uid, ids):
self._costs_generate(cr, uid, production)
write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')})
return write_res
def test_production_done(self, cr, uid, ids):
""" Tests whether production is done or not.
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines:
res = False
if production.move_created_ids:
res = False
return res
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
""" Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but if the
module mrp_subproduct is installed, then we must use the move_id to identify the product to produce
and its quantity.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct.
:return: The factor to apply to the quantity that we should produce for the given production order.
"""
return 1
def action_produce(self, cr, uid, production_id, production_qty, production_mode, context=None):
""" To produce final product based on production mode (consume/consume&produce).
If Production mode is consume, all stock move lines of raw materials will be done/consumed.
If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed
and stock move lines of final product will be also done/produced.
@param production_id: the ID of mrp.production object
@param production_qty: specify qty to produce
@param production_mode: specify production mode (consume/consume&produce).
@return: True
"""
stock_mov_obj = self.pool.get('stock.move')
production = self.browse(cr, uid, production_id, context=context)
produced_qty = 0
for produced_product in production.move_created_ids2:
if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id):
continue
produced_qty += produced_product.product_qty
if production_mode in ['consume','consume_produce']:
consumed_data = {}
# Calculate already consumed qtys
for consumed in production.move_lines2:
if consumed.scrapped:
continue
if not consumed_data.get(consumed.product_id.id, False):
consumed_data[consumed.product_id.id] = 0
consumed_data[consumed.product_id.id] += consumed.product_qty
# Find product qty to be consumed and consume it
for scheduled in production.product_lines:
# total qty of consumed product we need after this consumption
total_consume = ((production_qty + produced_qty) * scheduled.product_qty / production.product_qty)
# qty available for consume and produce
qty_avail = scheduled.product_qty - consumed_data.get(scheduled.product_id.id, 0.0)
if qty_avail <= 0.0:
# there will be nothing to consume for this raw material
continue
raw_product = [move for move in production.move_lines if move.product_id.id==scheduled.product_id.id]
if raw_product:
# qtys we have to consume
qty = total_consume - consumed_data.get(scheduled.product_id.id, 0.0)
if float_compare(qty, qty_avail, precision_rounding=scheduled.product_id.uom_id.rounding) == 1:
# if qtys we have to consume is more than qtys available to consume
prod_name = scheduled.product_id.name_get()[0][1]
raise osv.except_osv(_('Warning!'), _('You are going to consume total %s quantities of "%s".\nBut you can only consume up to total %s quantities.') % (qty, prod_name, qty_avail))
if qty <= 0.0:
# we already have more qtys consumed than we need
continue
raw_product[0].action_consume(qty, raw_product[0].location_id.id, context=context)
if production_mode == 'consume_produce':
# To produce remaining qty of final product
#vals = {'state':'confirmed'}
#final_product_todo = [x.id for x in production.move_created_ids]
#stock_mov_obj.write(cr, uid, final_product_todo, vals)
#stock_mov_obj.action_confirm(cr, uid, final_product_todo, context)
produced_products = {}
for produced_product in production.move_created_ids2:
if produced_product.scrapped:
continue
if not produced_products.get(produced_product.product_id.id, False):
produced_products[produced_product.product_id.id] = 0
produced_products[produced_product.product_id.id] += produced_product.product_qty
for produce_product in production.move_created_ids:
produced_qty = produced_products.get(produce_product.product_id.id, 0)
subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context)
rest_qty = (subproduct_factor * production.product_qty) - produced_qty
if rest_qty < production_qty:
prod_name = produce_product.product_id.name_get()[0][1]
raise osv.except_osv(_('Warning!'), _('You are going to produce total %s quantities of "%s".\nBut you can only produce up to total %s quantities.') % (production_qty, prod_name, rest_qty))
if rest_qty > 0 :
stock_mov_obj.action_consume(cr, uid, [produce_product.id], (subproduct_factor * production_qty), context=context)
for raw_product in production.move_lines2:
new_parent_ids = []
parent_move_ids = [x.id for x in raw_product.move_history_ids]
for final_product in production.move_created_ids2:
if final_product.id not in parent_move_ids:
new_parent_ids.append(final_product.id)
for new_parent_id in new_parent_ids:
stock_mov_obj.write(cr, uid, [raw_product.id], {'move_history_ids': [(4,new_parent_id)]})
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'mrp.production', production_id, 'button_produce_done', cr)
return True
def _costs_generate(self, cr, uid, production):
""" Calculates total costs at the end of the production.
@param production: Id of production order.
@return: Calculated amount.
"""
amount = 0.0
analytic_line_obj = self.pool.get('account.analytic.line')
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
if wc.costs_journal_id and wc.costs_general_account_id:
# Cost per hour
value = wc_line.hour * wc.costs_hour
account = wc.costs_hour_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, uid, {
'name': wc_line.name + ' (H)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.hour,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
} )
# Cost per cycle
value = wc_line.cycle * wc.costs_cycle
account = wc.costs_cycle_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, uid, {
'name': wc_line.name+' (C)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.cycle,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
} )
return amount
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')})
def test_if_product(self, cr, uid, ids):
"""
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if not production.product_lines:
if not self.action_compute(cr, uid, [production.id]):
res = False
return res
def _get_auto_picking(self, cr, uid, production):
return True
def _make_production_line_procurement(self, cr, uid, production_line, shipment_move_id, context=None):
wf_service = netsvc.LocalService("workflow")
procurement_order = self.pool.get('procurement.order')
production = production_line.production_id
location_id = production.location_src_id.id
date_planned = production.date_planned
procurement_name = (production.origin or '').split(':')[0] + ':' + production.name
procurement_id = procurement_order.create(cr, uid, {
'name': procurement_name,
'origin': procurement_name,
'date_planned': date_planned,
'product_id': production_line.product_id.id,
'product_qty': production_line.product_qty,
'product_uom': production_line.product_uom.id,
'product_uos_qty': production_line.product_uos and production_line.product_qty or False,
'product_uos': production_line.product_uos and production_line.product_uos.id or False,
'location_id': location_id,
'procure_method': production_line.product_id.procure_method,
'move_id': shipment_move_id,
'company_id': production.company_id.id,
})
wf_service.trg_validate(uid, procurement_order._name, procurement_id, 'button_confirm', cr)
return procurement_id
def _make_production_internal_shipment_line(self, cr, uid, production_line, shipment_id, parent_move_id, destination_location_id=False, context=None):
stock_move = self.pool.get('stock.move')
production = production_line.production_id
date_planned = production.date_planned
# Internal shipment is created for Stockable and Consumer Products
if production_line.product_id.type not in ('product', 'consu'):
return False
source_location_id = production.location_src_id.id
if not destination_location_id:
destination_location_id = source_location_id
return stock_move.create(cr, uid, {
'name': production.name,
'picking_id': shipment_id,
'product_id': production_line.product_id.id,
'product_qty': production_line.product_qty,
'product_uom': production_line.product_uom.id,
'product_uos_qty': production_line.product_uos and production_line.product_uos_qty or False,
'product_uos': production_line.product_uos and production_line.product_uos.id or False,
'date': date_planned,
'move_dest_id': parent_move_id,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'state': 'waiting',
'company_id': production.company_id.id,
})
def _make_production_internal_shipment(self, cr, uid, production, context=None):
ir_sequence = self.pool.get('ir.sequence')
stock_picking = self.pool.get('stock.picking')
routing_loc = None
pick_type = 'internal'
partner_id = False
# Take routing address as a Shipment Address.
# If usage of routing location is a internal, make outgoing shipment otherwise internal shipment
if production.bom_id.routing_id and production.bom_id.routing_id.location_id:
routing_loc = production.bom_id.routing_id.location_id
if routing_loc.usage != 'internal':
pick_type = 'out'
partner_id = routing_loc.partner_id and routing_loc.partner_id.id or False
# Take next Sequence number of shipment base on type
pick_name = ir_sequence.get(cr, uid, 'stock.picking.' + pick_type)
picking_id = stock_picking.create(cr, uid, {
'name': pick_name,
'origin': (production.origin or '').split(':')[0] + ':' + production.name,
'type': pick_type,
'move_type': 'one',
'state': 'auto',
'partner_id': partner_id,
'auto_picking': self._get_auto_picking(cr, uid, production),
'company_id': production.company_id.id,
})
production.write({'picking_id': picking_id}, context=context)
return picking_id
def _make_production_produce_line(self, cr, uid, production, context=None):
stock_move = self.pool.get('stock.move')
source_location_id = production.product_id.property_stock_production.id
destination_location_id = production.location_dest_id.id
data = {
'name': production.name,
'date': production.date_planned,
'product_id': production.product_id.id,
'product_qty': production.product_qty,
'product_uom': production.product_uom.id,
'product_uos_qty': production.product_uos and production.product_uos_qty or False,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': production.move_prod_id.id,
'state': 'waiting',
'company_id': production.company_id.id,
}
move_id = stock_move.create(cr, uid, data, context=context)
production.write({'move_created_ids': [(6, 0, [move_id])]}, context=context)
return move_id
def _make_production_consume_line(self, cr, uid, production_line, parent_move_id, source_location_id=False, context=None):
stock_move = self.pool.get('stock.move')
production = production_line.production_id
# Internal shipment is created for Stockable and Consumer Products
if production_line.product_id.type not in ('product', 'consu'):
return False
destination_location_id = production.product_id.property_stock_production.id
if not source_location_id:
source_location_id = production.location_src_id.id
move_id = stock_move.create(cr, uid, {
'name': production.name,
'date': production.date_planned,
'product_id': production_line.product_id.id,
'product_qty': production_line.product_qty,
'product_uom': production_line.product_uom.id,
'product_uos_qty': production_line.product_uos and production_line.product_uos_qty or False,
'product_uos': production_line.product_uos and production_line.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': parent_move_id,
'state': 'waiting',
'company_id': production.company_id.id,
})
production.write({'move_lines': [(4, move_id)]}, context=context)
return move_id
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order.
@return: Newly generated Shipment Id.
"""
shipment_id = False
wf_service = netsvc.LocalService("workflow")
uncompute_ids = filter(lambda x:x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)])
self.action_compute(cr, uid, uncompute_ids, context=context)
for production in self.browse(cr, uid, ids, context=context):
shipment_id = self._make_production_internal_shipment(cr, uid, production, context=context)
produce_move_id = self._make_production_produce_line(cr, uid, production, context=context)
# Take routing location as a Source Location.
source_location_id = production.location_src_id.id
if production.bom_id.routing_id and production.bom_id.routing_id.location_id:
source_location_id = production.bom_id.routing_id.location_id.id
for line in production.product_lines:
consume_move_id = self._make_production_consume_line(cr, uid, line, produce_move_id, source_location_id=source_location_id, context=context)
shipment_move_id = self._make_production_internal_shipment_line(cr, uid, line, shipment_id, consume_move_id,\
destination_location_id=source_location_id, context=context)
self._make_production_line_procurement(cr, uid, line, shipment_move_id, context=context)
wf_service.trg_validate(uid, 'stock.picking', shipment_id, 'button_confirm', cr)
production.write({'state':'confirmed'}, context=context)
return shipment_id
def force_production(self, cr, uid, ids, *args):
""" Assigns products.
@param *args: Arguments
@return: True
"""
pick_obj = self.pool.get('stock.picking')
pick_obj.force_assign(cr, uid, [prod.picking_id.id for prod in self.browse(cr, uid, ids)])
return True
class mrp_production_workcenter_line(osv.osv):
_name = 'mrp.production.workcenter.line'
_description = 'Work Order'
_order = 'sequence'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Work Order', size=64, required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles', digits=(16,2)),
'hour': fields.float('Number of Hours', digits=(16,2)),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."),
'production_id': fields.many2one('mrp.production', 'Manufacturing Order',
track_visibility='onchange', select=True, ondelete='cascade', required=True),
}
_defaults = {
'sequence': lambda *a: 1,
'hour': lambda *a: 0,
'cycle': lambda *a: 0,
}
class mrp_production_product_line(osv.osv):
_name = 'mrp.production.product.line'
_description = 'Production Scheduled Product'
_columns = {
'name': fields.char('Name', size=64, required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_uos_qty': fields.float('Product UOS Quantity'),
'product_uos': fields.many2one('product.uom', 'Product UOS'),
'production_id': fields.many2one('mrp.production', 'Production Order', select=True),
}
class product_product(osv.osv):
_inherit = "product.product"
_columns = {
'bom_ids': fields.one2many('mrp.bom', 'product_id', 'Bill of Materials'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,376,388,431,002,279,000 | 52.25071 | 296 | 0.595263 | false |
adrienbrault/home-assistant | homeassistant/components/tradfri/sensor.py | 4 | 1603 | """Support for IKEA Tradfri sensors."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from .base_class import TradfriBaseDevice
from .const import CONF_GATEWAY_ID, DEVICES, DOMAIN, KEY_API
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Tradfri config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
tradfri_data = hass.data[DOMAIN][config_entry.entry_id]
api = tradfri_data[KEY_API]
devices = tradfri_data[DEVICES]
sensors = (
dev
for dev in devices
if not dev.has_light_control
and not dev.has_socket_control
and not dev.has_blind_control
and not dev.has_signal_repeater_control
)
if sensors:
async_add_entities(TradfriSensor(sensor, api, gateway_id) for sensor in sensors)
class TradfriSensor(TradfriBaseDevice, SensorEntity):
"""The platform class required by Home Assistant."""
def __init__(self, device, api, gateway_id):
"""Initialize the device."""
super().__init__(device, api, gateway_id)
self._unique_id = f"{gateway_id}-{device.id}"
@property
def device_class(self):
"""Return the devices' state attributes."""
return DEVICE_CLASS_BATTERY
@property
def state(self):
"""Return the current state of the device."""
return self._device.device_info.battery_level
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return PERCENTAGE
| mit | 54,531,390,477,784,540 | 31.06 | 88 | 0.668746 | false |
sebastienbarbier/723e | seven23/api/accounts/views.py | 2 | 1067 | """
api/va/accounts views
"""
import json
from django.contrib.auth import authenticate
# Default user model may get swapped out of the system and hence.
from django.contrib.auth.models import User
from rest_framework import viewsets, generics, permissions
from rest_framework_bulk import BulkModelViewSet
from seven23.api.permissions import IsPaid
from seven23.models.accounts.models import Account
from seven23.models.accounts.serializers import AccountSerializer
class AccountsList(BulkModelViewSet):
"""
Distribute Account model object
"""
serializer_class = AccountSerializer
permission_classes = (permissions.IsAuthenticated, IsPaid)
def get_queryset(self):
if self.request.user.is_anonymous:
return Account.objects.none()
return self.request.user.accounts.all()
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def allow_bulk_destroy(self, qs, filtered):
if isinstance(self.request.data, list):
return True
return False | mit | 5,399,311,373,164,495,000 | 28.666667 | 65 | 0.731959 | false |
chfw/pyexcel | pyexcel/docstrings/core.py | 2 | 2860 | """
pyexcel.docstrings.core
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Reusible docstrings for pyexcel.core
:copyright: (c) 2015-2020 by Onni Software Ltd.
:license: New BSD License
"""
from . import keywords
__GET_SHEET__ = (
keywords.EXAMPLE_NOTE_PAGINATION
+ keywords.SOURCE_PARAMS_TABLE
+ """
**Parameters**
"""
+ keywords.SOURCE_PARAMS
)
__GET_BOOK__ = (
keywords.SOURCE_BOOK_PARAMS_TABLE
+ """
**Parameters**
"""
+ keywords.SOURCE_BOOK_PARAMS
)
I_NOTE = (
"""
When you use this function to work on physical files, this function
will leave its file handle open. When you finish the operation
on its data, you need to call :func:`pyexcel.free_resources` to
close file hande(s).
"""
+ keywords.I_NOTE
)
__SAVE_AS__ = (
"""
It accepts two sets of keywords. Why two sets? one set is
source, the other set is destination. In order to distinguish
the two sets, source set will be exactly the same
as the ones for :meth:`pyexcel.get_sheet`; destination
set are exactly the same as the ones for :class:`pyexcel.Sheet.save_as`
but require a 'dest' prefix.
"""
+ keywords.DEST_PARAMS_TABLE
+ __GET_SHEET__
+ keywords.DEST_PARAMS
+ """
if csv file is destination format, python csv
`fmtparams <https://docs.python.org/release/3.1.5/
library/csv.html#dialects-and-formatting-parameters>`_
are accepted
for example: dest_lineterminator will replace default '\r\n'
to the one you specified
In addition, this function use :class:`pyexcel.Sheet` to
render the data which could have performance penalty. In exchange,
parameters for :class:`pyexcel.Sheet` can be passed on, e.g.
`name_columns_by_row`.
"""
)
__SAVE_BOOK_AS__ = (
__GET_BOOK__
+ keywords.DEST_BOOK_PARAMS
+ """
Where the dictionary should have text as keys and two dimensional
array as values.
================ ============================================
Saving to source parameters
================ ============================================
file dest_file_name, dest_sheet_name,
keywords with prefix 'dest'
memory dest_file_type, dest_content,
dest_sheet_name, keywords with prefix 'dest'
sql dest_session, dest_tables,
dest_table_init_func, dest_mapdict
django model dest_models, dest_initializers,
dest_mapdict, dest_batch_size
================ ============================================
"""
)
GET_SHEET = __GET_SHEET__
GET_ARRAY = __GET_SHEET__
IGET_ARRAY = __GET_SHEET__ + I_NOTE
GET_DICT = __GET_SHEET__
GET_RECORDS = __GET_SHEET__
IGET_RECORDS = __GET_SHEET__ + I_NOTE
SAVE_AS = __SAVE_AS__
ISAVE_AS = __SAVE_AS__ + I_NOTE
GET_BOOK = __GET_BOOK__
IGET_BOOK = __GET_BOOK__ + I_NOTE
GET_BOOK_DICT = __GET_BOOK__
SAVE_BOOK_AS = __SAVE_BOOK_AS__
ISAVE_BOOK_AS = __SAVE_BOOK_AS__ + I_NOTE
| bsd-3-clause | -1,879,801,414,825,187,000 | 23.444444 | 71 | 0.607343 | false |
olasitarska/django | tests/settings_tests/tests.py | 10 | 16616 | import os
import sys
from types import ModuleType
import unittest
import warnings
from django.conf import LazySettings, Settings, settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import (SimpleTestCase, TransactionTestCase, TestCase,
modify_settings, override_settings, signals)
from django.utils import six
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3],
TEST='override', TEST_OUTER='outer')
class FullyDecoratedTranTestCase(TransactionTestCase):
available_apps = []
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
self.assertEqual(settings.TEST, 'override')
self.assertEqual(settings.TEST_OUTER, 'outer')
@modify_settings(ITEMS={
'append': ['e', 'f'],
'prepend': ['a'],
'remove': ['d', 'c'],
})
def test_method_list_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f'])
self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3])
@modify_settings(ITEMS={
'append': ['b'],
'prepend': ['d'],
'remove': ['a', 'c', 'e'],
})
def test_method_list_override_no_ops(self):
self.assertListEqual(settings.ITEMS, ['b', 'd'])
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
def test_method_list_override_strings(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
@modify_settings(ITEMS={'remove': ['b', 'd']})
@modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']})
def test_method_list_override_nested_order(self):
self.assertListEqual(settings.ITEMS, ['d', 'c', 'b'])
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
self.assertEqual(settings.TEST_OUTER, 'outer')
def test_decorated_testcase_name(self):
self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase')
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
@modify_settings(ITEMS={
'prepend': ['b'],
'append': ['d'],
'remove': ['a', 'e']
})
@override_settings(ITEMS=['a', 'c', 'e'], TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertListEqual(settings.ITEMS, ['b', 'c', 'd'])
self.assertEqual(settings.TEST, 'override')
@modify_settings(ITEMS={
'append': 'e',
'prepend': 'a',
'remove': 'c',
})
@override_settings(TEST='override2')
def test_method_override(self):
self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e'])
self.assertEqual(settings.TEST, 'override2')
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
@override_settings(TEST='override')
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, 'override')
@override_settings(TEST='override2')
def test_method_override(self):
self.assertEqual(settings.TEST, 'override2')
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError:
self.fail()
@modify_settings(ITEMS={'append': 'mother'})
@override_settings(ITEMS=['father'], TEST='override-parent')
class ParentDecoratedTestCase(TestCase):
pass
@modify_settings(ITEMS={'append': ['child']})
@override_settings(TEST='override-child')
class ChildDecoratedTestCase(ParentDecoratedTestCase):
def test_override_settings_inheritance(self):
self.assertEqual(settings.ITEMS, ['father', 'mother', 'child'])
self.assertEqual(settings.TEST, 'override-child')
class SettingsTests(TestCase):
def setUp(self):
self.testvalue = None
signals.setting_changed.connect(self.signal_callback)
def tearDown(self):
signals.setting_changed.disconnect(self.signal_callback)
def signal_callback(self, sender, setting, value, **kwargs):
if setting == 'TEST':
self.testvalue = value
def test_override(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test2'
self.assertEqual('test', settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual('override', settings.TEST)
settings.TEST = 'test'
self.assertRaises(AttributeError, getattr, settings, 'TEST')
@override_settings(TEST='override')
def test_decorator(self):
self.assertEqual('override', settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override = override_settings(TEST='override')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
override.enable()
self.assertEqual('override', settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_class_decorator(self):
# SimpleTestCase can be decorated by override_settings, but not ut.TestCase
class SimpleTestCaseSubclass(SimpleTestCase):
pass
class UnittestTestCaseSubclass(unittest.TestCase):
pass
decorated = override_settings(TEST='override')(SimpleTestCaseSubclass)
self.assertIsInstance(decorated, type)
self.assertTrue(issubclass(decorated, SimpleTestCase))
with six.assertRaisesRegex(self, Exception,
"Only subclasses of Django SimpleTestCase*"):
decorated = override_settings(TEST='override')(UnittestTestCaseSubclass)
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, 'TEST')
with self.settings(TEST='override'):
self.assertEqual(self.testvalue, 'override')
self.assertEqual(self.testvalue, None)
@override_settings(TEST='override')
def test_signal_callback_decorator(self):
self.assertEqual(self.testvalue, 'override')
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
def test_override_settings_delete(self):
"""
Allow deletion of a setting in an overridden settings set (#18824)
"""
previous_i18n = settings.USE_I18N
previous_l10n = settings.USE_L10N
with self.settings(USE_I18N=False):
del settings.USE_I18N
self.assertRaises(AttributeError, getattr, settings, 'USE_I18N')
# Should also work for a non-overridden setting
del settings.USE_L10N
self.assertRaises(AttributeError, getattr, settings, 'USE_L10N')
self.assertEqual(settings.USE_I18N, previous_i18n)
self.assertEqual(settings.USE_L10N, previous_l10n)
def test_override_settings_nested(self):
"""
Test that override_settings uses the actual _wrapped attribute at
runtime, not when it was instantiated.
"""
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
inner = override_settings(TEST2='override')
with override_settings(TEST='override'):
self.assertEqual('override', settings.TEST)
with inner:
self.assertEqual('override', settings.TEST)
self.assertEqual('override', settings.TEST2)
# inner's __exit__ should have restored the settings of the outer
# context manager, not those when the class was instantiated
self.assertEqual('override', settings.TEST)
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
self.assertRaises(AttributeError, getattr, settings, 'TEST')
self.assertRaises(AttributeError, getattr, settings, 'TEST2')
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(ValueError, setattr, settings,
'ALLOWED_INCLUDE_ROOTS', '/var/www/ssi/')
class TestComplexSettingOverride(TestCase):
def setUp(self):
self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy()
signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN')
def tearDown(self):
signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings
self.assertFalse('TEST_WARN' in signals.COMPLEX_OVERRIDE_SETTINGS)
def test_complex_override_warning(self):
"""Regression test for #19031"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with override_settings(TEST_WARN='override'):
self.assertEqual(settings.TEST_WARN, 'override')
self.assertEqual(len(w), 1)
# File extension may by .py, .pyc, etc. Compare only basename.
self.assertEqual(os.path.splitext(w[0].filename)[0],
os.path.splitext(__file__)[0])
self.assertEqual(str(w[0].message),
'Overriding setting TEST_WARN can lead to unexpected behavior.')
class TrailingSlashURLTests(TestCase):
"""
Tests for the MEDIA_URL and STATIC_URL settings.
They must end with a slash to ensure there's a deterministic way to build
paths in templates.
"""
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
self._original_static_url = self.settings_module.STATIC_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
self.settings_module.STATIC_URL = self._original_static_url
def test_blank(self):
"""
The empty string is accepted, even though it doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = ''
self.assertEqual('', self.settings_module.STATIC_URL)
def test_end_slash(self):
"""
It works if the value ends in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/'
self.assertEqual('http://static.foo.com/',
self.settings_module.STATIC_URL)
def test_no_end_slash(self):
"""
An ImproperlyConfigured exception is raised if the value doesn't end
in a slash.
"""
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.MEDIA_URL = 'http://media.foo.com'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = '/foo'
with self.assertRaises(ImproperlyConfigured):
self.settings_module.STATIC_URL = 'http://static.foo.com'
def test_double_slash(self):
"""
If the value ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/wrong//'
self.assertEqual('/wrong//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/wrong//'
self.assertEqual('http://media.foo.com/wrong//',
self.settings_module.MEDIA_URL)
self.settings_module.STATIC_URL = '/wrong//'
self.assertEqual('/wrong//', self.settings_module.STATIC_URL)
self.settings_module.STATIC_URL = 'http://static.foo.com/wrong//'
self.assertEqual('http://static.foo.com/wrong//',
self.settings_module.STATIC_URL)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue'
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
req = HttpRequest()
req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https'
self.assertEqual(req.is_secure(), True)
class IsOverriddenTest(TestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY='foo')
self.assertTrue(s.is_overridden('SECRET_KEY'))
def test_module(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
sys.modules['fake_settings_module'] = settings_module
try:
s = Settings('fake_settings_module')
self.assertTrue(s.is_overridden('SECRET_KEY'))
self.assertFalse(s.is_overridden('TEMPLATE_LOADERS'))
finally:
del sys.modules['fake_settings_module']
def test_override(self):
self.assertFalse(settings.is_overridden('TEMPLATE_LOADERS'))
with override_settings(TEMPLATE_LOADERS=[]):
self.assertTrue(settings.is_overridden('TEMPLATE_LOADERS'))
class TestTupleSettings(unittest.TestCase):
"""
Make sure settings that should be tuples throw ImproperlyConfigured if they
are set to a string instead of a tuple.
"""
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS", "LOCALE_PATHS")
def test_tuple_settings(self):
settings_module = ModuleType('fake_settings_module')
settings_module.SECRET_KEY = 'foo'
for setting in self.tuple_settings:
setattr(settings_module, setting, ('non_tuple_value'))
sys.modules['fake_settings_module'] = settings_module
try:
with self.assertRaises(ImproperlyConfigured):
Settings('fake_settings_module')
finally:
del sys.modules['fake_settings_module']
delattr(settings_module, setting)
| bsd-3-clause | 9,064,186,888,235,646,000 | 35.200436 | 93 | 0.638481 | false |
akash1808/python-barbicanclient | barbicanclient/secrets.py | 1 | 19860 | # Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import functools
import logging
from oslo_utils.timeutils import parse_isotime
import six
from barbicanclient import base
from barbicanclient import exceptions
from barbicanclient import formatter
LOG = logging.getLogger(__name__)
def lazy(func):
@functools.wraps(func)
def wrapper(self, *args):
self._fill_lazy_properties()
return func(self, *args)
return wrapper
def immutable_after_save(func):
@functools.wraps(func)
def wrapper(self, *args):
if self._secret_ref:
raise base.ImmutableException()
return func(self, *args)
return wrapper
class SecretFormatter(formatter.EntityFormatter):
columns = ("Secret href",
"Name",
"Created",
"Status",
"Content types",
"Algorithm",
"Bit length",
"Secret type",
"Mode",
"Expiration",
)
def _get_formatted_data(self):
data = (self.secret_ref,
self.name,
self.created,
self.status,
self.content_types,
self.algorithm,
self.bit_length,
self.secret_type,
self.mode,
self.expiration,
)
return data
class Secret(SecretFormatter):
"""
Secrets represent keys, credentials, and other sensitive data that is
stored by the Barbican service.
"""
_entity = 'secrets'
def __init__(self, api, name=None, expiration=None, algorithm=None,
bit_length=None, mode=None, payload=None,
payload_content_type=None, payload_content_encoding=None,
secret_ref=None, created=None, updated=None,
content_types=None, status=None, secret_type=None,
creator_id=None):
"""
Secret objects should not be instantiated directly. You should use
the `create` or `get` methods of the
:class:`barbicanclient.secrets.SecretManager` instead.
"""
self._api = api
self._secret_ref = secret_ref
self._fill_from_data(
name=name,
expiration=expiration,
algorithm=algorithm,
bit_length=bit_length,
secret_type=secret_type,
mode=mode,
payload=payload,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding,
created=created,
updated=updated,
content_types=content_types,
status=status,
creator_id=creator_id
)
@property
def secret_ref(self):
return self._secret_ref
@property
@lazy
def name(self):
return self._name
@property
@lazy
def expiration(self):
return self._expiration
@property
@lazy
def algorithm(self):
return self._algorithm
@property
@lazy
def bit_length(self):
return self._bit_length
@property
@lazy
def secret_type(self):
return self._secret_type
@property
@lazy
def mode(self):
return self._mode
@property
@lazy
def payload_content_encoding(self):
return self._payload_content_encoding
@property
@lazy
def created(self):
return self._created
@property
@lazy
def updated(self):
return self._updated
@property
@lazy
def content_types(self):
if self._content_types:
return self._content_types
elif self._payload_content_type:
return {u'default': six.u(self.payload_content_type)}
return None
@property
@lazy
def status(self):
return self._status
@property
def payload_content_type(self):
if not self._payload_content_type and self.content_types:
self._payload_content_type = self.content_types.get('default')
return self._payload_content_type
@property
def payload(self):
"""
Lazy-loaded property that holds the unencrypted data
"""
if self._payload is None and self.secret_ref is not None:
try:
self._fetch_payload()
except ValueError:
LOG.warning("Secret does not contain a payload")
return None
return self._payload
@name.setter
@immutable_after_save
def name(self, value):
self._name = value
@expiration.setter
@immutable_after_save
def expiration(self, value):
self._expiration = value
@algorithm.setter
@immutable_after_save
def algorithm(self, value):
self._algorithm = value
@bit_length.setter
@immutable_after_save
def bit_length(self, value):
self._bit_length = value
@secret_type.setter
@immutable_after_save
def secret_type(self, value):
self._secret_type = value
@mode.setter
@immutable_after_save
def mode(self, value):
self._mode = value
@payload.setter
def payload(self, value):
self._payload = value
@payload_content_type.setter
@immutable_after_save
def payload_content_type(self, value):
LOG.warning(
'DEPRECATION WARNING: Manually setting the payload_content_type '
'can lead to unexpected results. It will be removed in a future '
'release. See Launchpad Bug #1419166.'
)
self._payload_content_type = value
@payload_content_encoding.setter
@immutable_after_save
def payload_content_encoding(self, value):
LOG.warning(
'DEPRECATION WARNING: Manually setting the '
'payload_content_encoding can lead to unexpected results. It '
'will be removed in a future release. See Launchpad Bug #1419166.'
)
self._payload_content_encoding = value
def _fetch_payload(self):
if not self.payload_content_type and not self.content_types:
raise ValueError('Secret has no encrypted data to decrypt.')
elif not self.payload_content_type:
raise ValueError("Must specify decrypt content-type as "
"secret does not specify a 'default' "
"content-type.")
headers = {'Accept': self.payload_content_type}
if self._secret_ref[-1] != "/":
payload_url = self._secret_ref + '/payload'
else:
payload_url = self._secret_ref + 'payload'
payload = self._api._get_raw(payload_url, headers=headers)
if self.payload_content_type == u'text/plain':
self._payload = payload.decode('UTF-8')
else:
self._payload = payload
@immutable_after_save
def store(self):
"""
Stores the Secret in Barbican. New Secret objects are not persisted
in Barbican until this method is called.
:raises: PayloadException
"""
secret_dict = {
'name': self.name,
'algorithm': self.algorithm,
'mode': self.mode,
'bit_length': self.bit_length,
'secret_type': self.secret_type,
'expiration': self.expiration
}
if self.payload == '':
raise exceptions.PayloadException("Invalid Payload: "
"Cannot Be Empty String")
if self.payload is not None and not isinstance(self.payload,
(six.text_type,
six.binary_type)):
raise exceptions.PayloadException("Invalid Payload Type")
if self.payload_content_type or self.payload_content_encoding:
"""
Setting the payload_content_type and payload_content_encoding
manually is deprecated. This clause of the if statement is here
for backwards compatibility and should be removed in a future
release.
"""
secret_dict['payload'] = self.payload
secret_dict['payload_content_type'] = self.payload_content_type
secret_dict['payload_content_encoding'] = (
self.payload_content_encoding
)
elif type(self.payload) is six.binary_type:
"""
six.binary_type is stored as application/octet-stream
and it is base64 encoded for a one-step POST
"""
secret_dict['payload'] = (
base64.b64encode(self.payload)
).decode('UTF-8')
secret_dict['payload_content_type'] = u'application/octet-stream'
secret_dict['payload_content_encoding'] = u'base64'
elif type(self.payload) is six.text_type:
"""
six.text_type is stored as text/plain
"""
secret_dict['payload'] = self.payload
secret_dict['payload_content_type'] = u'text/plain'
secret_dict = base.filter_null_keys(secret_dict)
LOG.debug("Request body: {0}".format(secret_dict))
# Save, store secret_ref and return
response = self._api.post(self._entity, json=secret_dict)
if response:
self._secret_ref = response.get('secret_ref')
return self.secret_ref
def update(self):
"""
Updates the secret in Barbican.
"""
if not self.payload:
raise exceptions.PayloadException("Invalid or Missing Payload")
if not self.secret_ref:
raise LookupError("Secret is not yet stored.")
if type(self.payload) is six.binary_type:
headers = {'content-type': "application/octet-stream"}
elif type(self.payload) is six.text_type:
headers = {'content-type': "text/plain"}
else:
raise exceptions.PayloadException("Invalid Payload Type")
self._api.put(self._secret_ref,
headers=headers,
data=self.payload)
def delete(self):
"""
Deletes the Secret from Barbican
"""
if self._secret_ref:
self._api.delete(self._secret_ref)
self._secret_ref = None
else:
raise LookupError("Secret is not yet stored.")
def _fill_from_data(self, name=None, expiration=None, algorithm=None,
bit_length=None, secret_type=None, mode=None,
payload=None, payload_content_type=None,
payload_content_encoding=None, created=None,
updated=None, content_types=None, status=None,
creator_id=None):
self._name = name
self._algorithm = algorithm
self._bit_length = bit_length
self._mode = mode
self._secret_type = secret_type
self._payload = payload
self._payload_content_encoding = payload_content_encoding
self._expiration = expiration
self._creator_id = creator_id
if not self._secret_type:
self._secret_type = "opaque"
if self._expiration:
self._expiration = parse_isotime(self._expiration)
if self._secret_ref:
self._content_types = content_types
self._status = status
self._created = created
self._updated = updated
if self._created:
self._created = parse_isotime(self._created)
if self._updated:
self._updated = parse_isotime(self._updated)
else:
self._content_types = None
self._status = None
self._created = None
self._updated = None
if not self._content_types:
self._payload_content_type = payload_content_type
else:
self._payload_content_type = self._content_types.get('default',
None)
def _fill_lazy_properties(self):
if self._secret_ref and not self._name:
result = self._api.get(self._secret_ref)
self._fill_from_data(
name=result.get('name'),
expiration=result.get('expiration'),
algorithm=result.get('algorithm'),
bit_length=result.get('bit_length'),
secret_type=result.get('secret_type'),
mode=result.get('mode'),
payload_content_type=result.get('payload_content_type'),
payload_content_encoding=result.get(
'payload_content_encoding'
),
created=result.get('created'),
updated=result.get('updated'),
content_types=result.get('content_types'),
status=result.get('status')
)
def __repr__(self):
if self._secret_ref:
return 'Secret(secret_ref="{0}")'.format(self._secret_ref)
return 'Secret(name="{0}")'.format(self._name)
class SecretManager(base.BaseEntityManager):
"""Entity Manager for Secret entities"""
def __init__(self, api):
super(SecretManager, self).__init__(api, 'secrets')
def get(self, secret_ref, payload_content_type=None):
"""
Retrieve an existing Secret from Barbican
:param str secret_ref: Full HATEOAS reference to a Secret
:param str payload_content_type: DEPRECATED: Content type to use for
payload decryption. Setting this can lead to unexpected results.
See Launchpad Bug #1419166.
:returns: Secret object retrieved from Barbican
:rtype: :class:`barbicanclient.secrets.Secret`
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
LOG.debug("Getting secret - Secret href: {0}".format(secret_ref))
base.validate_ref(secret_ref, 'Secret')
return Secret(
api=self._api,
payload_content_type=payload_content_type,
secret_ref=secret_ref
)
def update(self, secret_ref, payload=None):
"""
Update an existing Secret from Barbican
:param str secret_ref: Full HATEOAS reference to a Secret
:param str payload: New payload to add to secret
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
base.validate_ref(secret_ref, 'Secret')
if not secret_ref:
raise ValueError('secret_ref is required.')
if type(payload) is six.binary_type:
headers = {'content-type': "application/octet-stream"}
elif type(payload) is six.text_type:
headers = {'content-type': "text/plain"}
else:
raise exceptions.PayloadException("Invalid Payload Type")
self._api.put(secret_ref,
headers=headers,
data=payload)
def create(self, name=None, payload=None,
payload_content_type=None, payload_content_encoding=None,
algorithm=None, bit_length=None, secret_type=None,
mode=None, expiration=None):
"""
Factory method for creating new `Secret` objects
Secrets returned by this method have not yet been stored in the
Barbican service.
:param name: A friendly name for the Secret
:param payload: The unencrypted secret data
:param payload_content_type: DEPRECATED: The format/type of the secret
data. Setting this can lead to unexpected results. See Launchpad
Bug #1419166.
:param payload_content_encoding: DEPRECATED: The encoding of the secret
data. Setting this can lead to unexpected results. See Launchpad
Bug #1419166.
:param algorithm: The algorithm associated with this secret key
:param bit_length: The bit length of this secret key
:param mode: The algorithm mode used with this secret key
:param secret_type: The secret type for this secret key
:param expiration: The expiration time of the secret in ISO 8601 format
:returns: A new Secret object
:rtype: :class:`barbicanclient.secrets.Secret`
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
return Secret(api=self._api, name=name, payload=payload,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding,
algorithm=algorithm, bit_length=bit_length, mode=mode,
secret_type=secret_type, expiration=expiration)
def delete(self, secret_ref):
"""
Delete a Secret from Barbican
:param secret_ref: The href for the secret to be deleted
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
base.validate_ref(secret_ref, 'Secret')
if not secret_ref:
raise ValueError('secret_ref is required.')
self._api.delete(secret_ref)
def list(self, limit=10, offset=0, name=None, algorithm=None,
mode=None, bits=0):
"""
List Secrets for the project
This method uses the limit and offset parameters for paging,
and also supports filtering.
:param limit: Max number of secrets returned
:param offset: Offset secrets to begin list
:param name: Name filter for the list
:param algorithm: Algorithm filter for the list
:param mode: Mode filter for the list
:param bits: Bits filter for the list
:returns: list of Secret objects that satisfy the provided filter
criteria.
:rtype: list
:raises barbicanclient.exceptions.HTTPAuthError: 401 Responses
:raises barbicanclient.exceptions.HTTPClientError: 4xx Responses
:raises barbicanclient.exceptions.HTTPServerError: 5xx Responses
"""
LOG.debug('Listing secrets - offset {0} limit {1}'.format(offset,
limit))
params = {'limit': limit, 'offset': offset}
if name:
params['name'] = name
if algorithm:
params['alg'] = algorithm
if mode:
params['mode'] = mode
if bits > 0:
params['bits'] = bits
response = self._api.get(self._entity, params=params)
return [
Secret(api=self._api, **s)
for s in response.get('secrets', [])
]
| apache-2.0 | 3,335,704,450,485,960,700 | 33.781086 | 79 | 0.584995 | false |
tyc85/nwsdr-3.6.3-dsc | dsc-bfsk-rs/cat_packet_utils_uncoded.py | 2 | 27914 | #
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import struct
import numpy
from gnuradio import gru
#import crc
import gnuradio.digital.crc as crc
def conv_packed_binary_string_to_1_0_string(s):
"""
'\xAF' --> '10101111'
"""
r = []
for ch in s:
x = ord(ch)
for i in range(7,-1,-1):
t = (x >> i) & 0x1
r.append(t)
return ''.join(map(lambda x: chr(x + ord('0')), r))
def conv_1_0_string_to_packed_binary_string(s):
"""
'10101111' -> ('\xAF', False)
Basically the inverse of conv_packed_binary_string_to_1_0_string,
but also returns a flag indicating if we had to pad with leading zeros
to get to a multiple of 8.
"""
if not is_1_0_string(s):
raise ValueError, "Input must be a string containing only 0's and 1's"
# pad to multiple of 8
padded = False
rem = len(s) % 8
if rem != 0:
npad = 8 - rem
s = '0' * npad + s
padded = True
assert len(s) % 8 == 0
r = []
i = 0
while i < len(s):
t = 0
for j in range(8):
t = (t << 1) | (ord(s[i + j]) - ord('0'))
r.append(chr(t))
i += 8
return (''.join(r), padded)
default_access_code = \
conv_packed_binary_string_to_1_0_string('\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC')
preamble = \
conv_packed_binary_string_to_1_0_string('\xA4\xF2')
def is_1_0_string(s):
if not isinstance(s, str):
return False
for ch in s:
if not ch in ('0', '1'):
return False
return True
def string_to_hex_list(s):
return map(lambda x: hex(ord(x)), s)
def whiten(s, o):
sa = numpy.fromstring(s, numpy.uint8)
z = sa ^ random_mask_vec8[o:len(sa)+o]
return z.tostring()
def dewhiten(s, o):
return whiten(s, o) # self inverse
def make_header(payload_len, whitener_offset=0):
# Upper nibble is offset, lower 12 bits is len
val = ((whitener_offset & 0xf) << 12) | (payload_len & 0x0fff)
#print "offset =", whitener_offset, " len =", payload_len, " val=", val
return struct.pack('!HH', val, val)
def make_packet(payload, samples_per_symbol, bits_per_symbol,
access_code=default_access_code, pad_for_usrp=True,
whitener_offset=0, whitening=True):
"""
Build a packet, given access code, payload, and whitener offset
@param payload: packet payload, len [0, 4096]
@param samples_per_symbol: samples per symbol (needed for padding calculation)
@type samples_per_symbol: int
@param bits_per_symbol: (needed for padding calculation)
@type bits_per_symbol: int
@param access_code: string of ascii 0's and 1's
@param whitener_offset offset into whitener string to use [0-16)
Packet will have access code at the beginning, followed by length, payload
and finally CRC-32.
"""
if not is_1_0_string(access_code):
raise ValueError, "access_code must be a string containing only 0's and 1's (%r)" % (access_code,)
if not whitener_offset >=0 and whitener_offset < 16:
raise ValueError, "whitener_offset must be between 0 and 15, inclusive (%i)" % (whitener_offset,)
(packed_access_code, padded) = conv_1_0_string_to_packed_binary_string(access_code)
(packed_preamble, ignore) = conv_1_0_string_to_packed_binary_string(preamble)
payload_with_crc = crc.gen_and_append_crc32(payload)
#print "outbound crc =", string_to_hex_list(payload_with_crc[-4:])
L = len(payload_with_crc)
MAXLEN = len(random_mask_tuple)
if L > MAXLEN:
raise ValueError, "len(payload) must be in [0, %d]" % (MAXLEN,)
if whitening:
pkt = ''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
whiten(payload_with_crc, whitener_offset), '\x55'))
else:
pkt = ''.join((packed_preamble, packed_access_code, make_header(L, whitener_offset),
(payload_with_crc), '\x55'))
if pad_for_usrp:
pkt = pkt + (_npadding_bytes(len(pkt), int(samples_per_symbol), bits_per_symbol) * '\x55')
#print "make_packet: len(pkt) =", len(pkt)
return pkt
def _npadding_bytes(pkt_byte_len, samples_per_symbol, bits_per_symbol):
"""
Generate sufficient padding such that each packet ultimately ends
up being a multiple of 512 bytes when sent across the USB. We
send 4-byte samples across the USB (16-bit I and 16-bit Q), thus
we want to pad so that after modulation the resulting packet
is a multiple of 128 samples.
@param ptk_byte_len: len in bytes of packet, not including padding.
@param samples_per_symbol: samples per bit (1 bit / symbolwidth GMSK)
@type samples_per_symbol: int
@param bits_per_symbol: bits per symbol (log2(modulation order))
@type bits_per_symbol: int
@returns number of bytes of padding to append.
"""
modulus = 128
byte_modulus = gru.lcm(modulus/8, samples_per_symbol) * bits_per_symbol / samples_per_symbol
r = pkt_byte_len % byte_modulus
if r == 0:
return 0
return byte_modulus - r
def unmake_packet(whitened_payload_with_crc, whitener_offset=0, dewhitening=True):
"""
Return (ok, payload)
@param whitened_payload_with_crc: string
"""
if dewhitening:
payload_with_crc = dewhiten(whitened_payload_with_crc, whitener_offset)
else:
payload_with_crc = (whitened_payload_with_crc)
ok, payload = crc.check_crc32(payload_with_crc)
if 0:
print "payload_with_crc =", string_to_hex_list(payload_with_crc)
print "ok = %r, len(payload) = %d" % (ok, len(payload))
print "payload =", string_to_hex_list(payload)
return ok, payload
# FYI, this PN code is the output of a 15-bit LFSR
random_mask_tuple = (
255, 63, 0, 16, 0, 12, 0, 5, 192, 3, 16, 1, 204, 0, 85, 192,
63, 16, 16, 12, 12, 5, 197, 195, 19, 17, 205, 204, 85, 149, 255, 47,
0, 28, 0, 9, 192, 6, 208, 2, 220, 1, 153, 192, 106, 208, 47, 28,
28, 9, 201, 198, 214, 210, 222, 221, 152, 89, 170, 186, 255, 51, 0, 21,
192, 15, 16, 4, 12, 3, 69, 193, 243, 16, 69, 204, 51, 21, 213, 207,
31, 20, 8, 15, 70, 132, 50, 227, 85, 137, 255, 38, 192, 26, 208, 11,
28, 7, 73, 194, 182, 209, 182, 220, 118, 217, 230, 218, 202, 219, 23, 27,
78, 139, 116, 103, 103, 106, 170, 175, 63, 60, 16, 17, 204, 12, 85, 197,
255, 19, 0, 13, 192, 5, 144, 3, 44, 1, 221, 192, 89, 144, 58, 236,
19, 13, 205, 197, 149, 147, 47, 45, 220, 29, 153, 201, 170, 214, 255, 30,
192, 8, 80, 6, 188, 2, 241, 193, 132, 80, 99, 124, 41, 225, 222, 200,
88, 86, 186, 190, 243, 48, 69, 212, 51, 31, 85, 200, 63, 22, 144, 14,
236, 4, 77, 195, 117, 145, 231, 44, 74, 157, 247, 41, 134, 158, 226, 232,
73, 142, 182, 228, 118, 203, 102, 215, 106, 222, 175, 24, 124, 10, 161, 199,
56, 82, 146, 189, 173, 177, 189, 180, 113, 183, 100, 118, 171, 102, 255, 106,
192, 47, 16, 28, 12, 9, 197, 198, 211, 18, 221, 205, 153, 149, 170, 239,
63, 12, 16, 5, 204, 3, 21, 193, 207, 16, 84, 12, 63, 69, 208, 51,
28, 21, 201, 207, 22, 212, 14, 223, 68, 88, 51, 122, 149, 227, 47, 9,
220, 6, 217, 194, 218, 209, 155, 28, 107, 73, 239, 118, 204, 38, 213, 218,
223, 27, 24, 11, 74, 135, 119, 34, 166, 153, 186, 234, 243, 15, 5, 196,
3, 19, 65, 205, 240, 85, 132, 63, 35, 80, 25, 252, 10, 193, 199, 16,
82, 140, 61, 165, 209, 187, 28, 115, 73, 229, 246, 203, 6, 215, 66, 222,
177, 152, 116, 106, 167, 111, 58, 172, 19, 61, 205, 209, 149, 156, 111, 41,
236, 30, 205, 200, 85, 150, 191, 46, 240, 28, 68, 9, 243, 70, 197, 242,
211, 5, 157, 195, 41, 145, 222, 236, 88, 77, 250, 181, 131, 55, 33, 214,
152, 94, 234, 184, 79, 50, 180, 21, 183, 79, 54, 180, 22, 247, 78, 198,
180, 82, 247, 125, 134, 161, 162, 248, 121, 130, 162, 225, 185, 136, 114, 230,
165, 138, 251, 39, 3, 90, 129, 251, 32, 67, 88, 49, 250, 148, 67, 47,
113, 220, 36, 89, 219, 122, 219, 99, 27, 105, 203, 110, 215, 108, 94, 173,
248, 125, 130, 161, 161, 184, 120, 114, 162, 165, 185, 187, 50, 243, 85, 133,
255, 35, 0, 25, 192, 10, 208, 7, 28, 2, 137, 193, 166, 208, 122, 220,
35, 25, 217, 202, 218, 215, 27, 30, 139, 72, 103, 118, 170, 166, 255, 58,
192, 19, 16, 13, 204, 5, 149, 195, 47, 17, 220, 12, 89, 197, 250, 211,
3, 29, 193, 201, 144, 86, 236, 62, 205, 208, 85, 156, 63, 41, 208, 30,
220, 8, 89, 198, 186, 210, 243, 29, 133, 201, 163, 22, 249, 206, 194, 212,
81, 159, 124, 104, 33, 238, 152, 76, 106, 181, 239, 55, 12, 22, 133, 206,
227, 20, 73, 207, 118, 212, 38, 223, 90, 216, 59, 26, 147, 75, 45, 247,
93, 134, 185, 162, 242, 249, 133, 130, 227, 33, 137, 216, 102, 218, 170, 219,
63, 27, 80, 11, 124, 7, 97, 194, 168, 81, 190, 188, 112, 113, 228, 36,
75, 91, 119, 123, 102, 163, 106, 249, 239, 2, 204, 1, 149, 192, 111, 16,
44, 12, 29, 197, 201, 147, 22, 237, 206, 205, 148, 85, 175, 127, 60, 32,
17, 216, 12, 90, 133, 251, 35, 3, 89, 193, 250, 208, 67, 28, 49, 201,
212, 86, 223, 126, 216, 32, 90, 152, 59, 42, 147, 95, 45, 248, 29, 130,
137, 161, 166, 248, 122, 194, 163, 17, 185, 204, 114, 213, 229, 159, 11, 40,
7, 94, 130, 184, 97, 178, 168, 117, 190, 167, 48, 122, 148, 35, 47, 89,
220, 58, 217, 211, 26, 221, 203, 25, 151, 74, 238, 183, 12, 118, 133, 230,
227, 10, 201, 199, 22, 210, 142, 221, 164, 89, 187, 122, 243, 99, 5, 233,
195, 14, 209, 196, 92, 83, 121, 253, 226, 193, 137, 144, 102, 236, 42, 205,
223, 21, 152, 15, 42, 132, 31, 35, 72, 25, 246, 138, 198, 231, 18, 202,
141, 151, 37, 174, 155, 60, 107, 81, 239, 124, 76, 33, 245, 216, 71, 26,
178, 139, 53, 167, 87, 58, 190, 147, 48, 109, 212, 45, 159, 93, 168, 57,
190, 146, 240, 109, 132, 45, 163, 93, 185, 249, 178, 194, 245, 145, 135, 44,
98, 157, 233, 169, 142, 254, 228, 64, 75, 112, 55, 100, 22, 171, 78, 255,
116, 64, 39, 112, 26, 164, 11, 59, 71, 83, 114, 189, 229, 177, 139, 52,
103, 87, 106, 190, 175, 48, 124, 20, 33, 207, 88, 84, 58, 191, 83, 48,
61, 212, 17, 159, 76, 104, 53, 238, 151, 12, 110, 133, 236, 99, 13, 233,
197, 142, 211, 36, 93, 219, 121, 155, 98, 235, 105, 143, 110, 228, 44, 75,
93, 247, 121, 134, 162, 226, 249, 137, 130, 230, 225, 138, 200, 103, 22, 170,
142, 255, 36, 64, 27, 112, 11, 100, 7, 107, 66, 175, 113, 188, 36, 113,
219, 100, 91, 107, 123, 111, 99, 108, 41, 237, 222, 205, 152, 85, 170, 191,
63, 48, 16, 20, 12, 15, 69, 196, 51, 19, 85, 205, 255, 21, 128, 15,
32, 4, 24, 3, 74, 129, 247, 32, 70, 152, 50, 234, 149, 143, 47, 36,
28, 27, 73, 203, 118, 215, 102, 222, 170, 216, 127, 26, 160, 11, 56, 7,
82, 130, 189, 161, 177, 184, 116, 114, 167, 101, 186, 171, 51, 63, 85, 208,
63, 28, 16, 9, 204, 6, 213, 194, 223, 17, 152, 12, 106, 133, 239, 35,
12, 25, 197, 202, 211, 23, 29, 206, 137, 148, 102, 239, 106, 204, 47, 21,
220, 15, 25, 196, 10, 211, 71, 29, 242, 137, 133, 166, 227, 58, 201, 211,
22, 221, 206, 217, 148, 90, 239, 123, 12, 35, 69, 217, 243, 26, 197, 203,
19, 23, 77, 206, 181, 148, 119, 47, 102, 156, 42, 233, 223, 14, 216, 4,
90, 131, 123, 33, 227, 88, 73, 250, 182, 195, 54, 209, 214, 220, 94, 217,
248, 90, 194, 187, 17, 179, 76, 117, 245, 231, 7, 10, 130, 135, 33, 162,
152, 121, 170, 162, 255, 57, 128, 18, 224, 13, 136, 5, 166, 131, 58, 225,
211, 8, 93, 198, 185, 146, 242, 237, 133, 141, 163, 37, 185, 219, 50, 219,
85, 155, 127, 43, 96, 31, 104, 8, 46, 134, 156, 98, 233, 233, 142, 206,
228, 84, 75, 127, 119, 96, 38, 168, 26, 254, 139, 0, 103, 64, 42, 176,
31, 52, 8, 23, 70, 142, 178, 228, 117, 139, 103, 39, 106, 154, 175, 43,
60, 31, 81, 200, 60, 86, 145, 254, 236, 64, 77, 240, 53, 132, 23, 35,
78, 153, 244, 106, 199, 111, 18, 172, 13, 189, 197, 177, 147, 52, 109, 215,
109, 158, 173, 168, 125, 190, 161, 176, 120, 116, 34, 167, 89, 186, 186, 243,
51, 5, 213, 195, 31, 17, 200, 12, 86, 133, 254, 227, 0, 73, 192, 54,
208, 22, 220, 14, 217, 196, 90, 211, 123, 29, 227, 73, 137, 246, 230, 198,
202, 210, 215, 29, 158, 137, 168, 102, 254, 170, 192, 127, 16, 32, 12, 24,
5, 202, 131, 23, 33, 206, 152, 84, 106, 191, 111, 48, 44, 20, 29, 207,
73, 148, 54, 239, 86, 204, 62, 213, 208, 95, 28, 56, 9, 210, 134, 221,
162, 217, 185, 154, 242, 235, 5, 143, 67, 36, 49, 219, 84, 91, 127, 123,
96, 35, 104, 25, 238, 138, 204, 103, 21, 234, 143, 15, 36, 4, 27, 67,
75, 113, 247, 100, 70, 171, 114, 255, 101, 128, 43, 32, 31, 88, 8, 58,
134, 147, 34, 237, 217, 141, 154, 229, 171, 11, 63, 71, 80, 50, 188, 21,
177, 207, 52, 84, 23, 127, 78, 160, 52, 120, 23, 98, 142, 169, 164, 126,
251, 96, 67, 104, 49, 238, 148, 76, 111, 117, 236, 39, 13, 218, 133, 155,
35, 43, 89, 223, 122, 216, 35, 26, 153, 203, 42, 215, 95, 30, 184, 8,
114, 134, 165, 162, 251, 57, 131, 82, 225, 253, 136, 65, 166, 176, 122, 244,
35, 7, 89, 194, 186, 209, 179, 28, 117, 201, 231, 22, 202, 142, 215, 36,
94, 155, 120, 107, 98, 175, 105, 188, 46, 241, 220, 68, 89, 243, 122, 197,
227, 19, 9, 205, 198, 213, 146, 223, 45, 152, 29, 170, 137, 191, 38, 240,
26, 196, 11, 19, 71, 77, 242, 181, 133, 183, 35, 54, 153, 214, 234, 222,
207, 24, 84, 10, 191, 71, 48, 50, 148, 21, 175, 79, 60, 52, 17, 215,
76, 94, 181, 248, 119, 2, 166, 129, 186, 224, 115, 8, 37, 198, 155, 18,
235, 77, 143, 117, 164, 39, 59, 90, 147, 123, 45, 227, 93, 137, 249, 166,
194, 250, 209, 131, 28, 97, 201, 232, 86, 206, 190, 212, 112, 95, 100, 56,
43, 82, 159, 125, 168, 33, 190, 152, 112, 106, 164, 47, 59, 92, 19, 121,
205, 226, 213, 137, 159, 38, 232, 26, 206, 139, 20, 103, 79, 106, 180, 47,
55, 92, 22, 185, 206, 242, 212, 69, 159, 115, 40, 37, 222, 155, 24, 107,
74, 175, 119, 60, 38, 145, 218, 236, 91, 13, 251, 69, 131, 115, 33, 229,
216, 75, 26, 183, 75, 54, 183, 86, 246, 190, 198, 240, 82, 196, 61, 147,
81, 173, 252, 125, 129, 225, 160, 72, 120, 54, 162, 150, 249, 174, 194, 252,
81, 129, 252, 96, 65, 232, 48, 78, 148, 52, 111, 87, 108, 62, 173, 208,
125, 156, 33, 169, 216, 126, 218, 160, 91, 56, 59, 82, 147, 125, 173, 225,
189, 136, 113, 166, 164, 122, 251, 99, 3, 105, 193, 238, 208, 76, 92, 53,
249, 215, 2, 222, 129, 152, 96, 106, 168, 47, 62, 156, 16, 105, 204, 46,
213, 220, 95, 25, 248, 10, 194, 135, 17, 162, 140, 121, 165, 226, 251, 9,
131, 70, 225, 242, 200, 69, 150, 179, 46, 245, 220, 71, 25, 242, 138, 197,
167, 19, 58, 141, 211, 37, 157, 219, 41, 155, 94, 235, 120, 79, 98, 180,
41, 183, 94, 246, 184, 70, 242, 178, 197, 181, 147, 55, 45, 214, 157, 158,
233, 168, 78, 254, 180, 64, 119, 112, 38, 164, 26, 251, 75, 3, 119, 65,
230, 176, 74, 244, 55, 7, 86, 130, 190, 225, 176, 72, 116, 54, 167, 86,
250, 190, 195, 48, 81, 212, 60, 95, 81, 248, 60, 66, 145, 241, 172, 68,
125, 243, 97, 133, 232, 99, 14, 169, 196, 126, 211, 96, 93, 232, 57, 142,
146, 228, 109, 139, 109, 167, 109, 186, 173, 179, 61, 181, 209, 183, 28, 118,
137, 230, 230, 202, 202, 215, 23, 30, 142, 136, 100, 102, 171, 106, 255, 111,
0, 44, 0, 29, 192, 9, 144, 6, 236, 2, 205, 193, 149, 144, 111, 44,
44, 29, 221, 201, 153, 150, 234, 238, 207, 12, 84, 5, 255, 67, 0, 49,
192, 20, 80, 15, 124, 4, 33, 195, 88, 81, 250, 188, 67, 49, 241, 212,
68, 95, 115, 120, 37, 226, 155, 9, 171, 70, 255, 114, 192, 37, 144, 27,
44, 11, 93, 199, 121, 146, 162, 237, 185, 141, 178, 229, 181, 139, 55, 39,
86, 154, 190, 235, 48, 79, 84, 52, 63, 87, 80, 62, 188, 16, 113, 204,
36, 85, 219, 127, 27, 96, 11, 104, 7, 110, 130, 172, 97, 189, 232, 113,
142, 164, 100, 123, 107, 99, 111, 105, 236, 46, 205, 220, 85, 153, 255, 42,
192, 31, 16, 8, 12, 6, 133, 194, 227, 17, 137, 204, 102, 213, 234, 223,
15, 24, 4, 10, 131, 71, 33, 242, 152, 69, 170, 179, 63, 53, 208, 23,
28, 14, 137, 196, 102, 211, 106, 221, 239, 25, 140, 10, 229, 199, 11, 18,
135, 77, 162, 181, 185, 183, 50, 246, 149, 134, 239, 34, 204, 25, 149, 202,
239, 23, 12, 14, 133, 196, 99, 19, 105, 205, 238, 213, 140, 95, 37, 248,
27, 2, 139, 65, 167, 112, 122, 164, 35, 59, 89, 211, 122, 221, 227, 25,
137, 202, 230, 215, 10, 222, 135, 24, 98, 138, 169, 167, 62, 250, 144, 67,
44, 49, 221, 212, 89, 159, 122, 232, 35, 14, 153, 196, 106, 211, 111, 29,
236, 9, 141, 198, 229, 146, 203, 45, 151, 93, 174, 185, 188, 114, 241, 229,
132, 75, 35, 119, 89, 230, 186, 202, 243, 23, 5, 206, 131, 20, 97, 207,
104, 84, 46, 191, 92, 112, 57, 228, 18, 203, 77, 151, 117, 174, 167, 60,
122, 145, 227, 44, 73, 221, 246, 217, 134, 218, 226, 219, 9, 155, 70, 235,
114, 207, 101, 148, 43, 47, 95, 92, 56, 57, 210, 146, 221, 173, 153, 189,
170, 241, 191, 4, 112, 3, 100, 1, 235, 64, 79, 112, 52, 36, 23, 91,
78, 187, 116, 115, 103, 101, 234, 171, 15, 63, 68, 16, 51, 76, 21, 245,
207, 7, 20, 2, 143, 65, 164, 48, 123, 84, 35, 127, 89, 224, 58, 200,
19, 22, 141, 206, 229, 148, 75, 47, 119, 92, 38, 185, 218, 242, 219, 5,
155, 67, 43, 113, 223, 100, 88, 43, 122, 159, 99, 40, 41, 222, 158, 216,
104, 90, 174, 187, 60, 115, 81, 229, 252, 75, 1, 247, 64, 70, 176, 50,
244, 21, 135, 79, 34, 180, 25, 183, 74, 246, 183, 6, 246, 130, 198, 225,
146, 200, 109, 150, 173, 174, 253, 188, 65, 177, 240, 116, 68, 39, 115, 90,
165, 251, 59, 3, 83, 65, 253, 240, 65, 132, 48, 99, 84, 41, 255, 94,
192, 56, 80, 18, 188, 13, 177, 197, 180, 83, 55, 125, 214, 161, 158, 248,
104, 66, 174, 177, 188, 116, 113, 231, 100, 74, 171, 119, 63, 102, 144, 42,
236, 31, 13, 200, 5, 150, 131, 46, 225, 220, 72, 89, 246, 186, 198, 243,
18, 197, 205, 147, 21, 173, 207, 61, 148, 17, 175, 76, 124, 53, 225, 215,
8, 94, 134, 184, 98, 242, 169, 133, 190, 227, 48, 73, 212, 54, 223, 86,
216, 62, 218, 144, 91, 44, 59, 93, 211, 121, 157, 226, 233, 137, 142, 230,
228, 74, 203, 119, 23, 102, 142, 170, 228, 127, 11, 96, 7, 104, 2, 174,
129, 188, 96, 113, 232, 36, 78, 155, 116, 107, 103, 111, 106, 172, 47, 61,
220, 17, 153, 204, 106, 213, 239, 31, 12, 8, 5, 198, 131, 18, 225, 205,
136, 85, 166, 191, 58, 240, 19, 4, 13, 195, 69, 145, 243, 44, 69, 221,
243, 25, 133, 202, 227, 23, 9, 206, 134, 212, 98, 223, 105, 152, 46, 234,
156, 79, 41, 244, 30, 199, 72, 82, 182, 189, 182, 241, 182, 196, 118, 211,
102, 221, 234, 217, 143, 26, 228, 11, 11, 71, 71, 114, 178, 165, 181, 187,
55, 51, 86, 149, 254, 239, 0, 76, 0, 53, 192, 23, 16, 14, 140, 4,
101, 195, 107, 17, 239, 76, 76, 53, 245, 215, 7, 30, 130, 136, 97, 166,
168, 122, 254, 163, 0, 121, 192, 34, 208, 25, 156, 10, 233, 199, 14, 210,
132, 93, 163, 121, 185, 226, 242, 201, 133, 150, 227, 46, 201, 220, 86, 217,
254, 218, 192, 91, 16, 59, 76, 19, 117, 205, 231, 21, 138, 143, 39, 36,
26, 155, 75, 43, 119, 95, 102, 184, 42, 242, 159, 5, 168, 3, 62, 129,
208, 96, 92, 40, 57, 222, 146, 216, 109, 154, 173, 171, 61, 191, 81, 176,
60, 116, 17, 231, 76, 74, 181, 247, 55, 6, 150, 130, 238, 225, 140, 72,
101, 246, 171, 6, 255, 66, 192, 49, 144, 20, 108, 15, 109, 196, 45, 147,
93, 173, 249, 189, 130, 241, 161, 132, 120, 99, 98, 169, 233, 190, 206, 240,
84, 68, 63, 115, 80, 37, 252, 27, 1, 203, 64, 87, 112, 62, 164, 16,
123, 76, 35, 117, 217, 231, 26, 202, 139, 23, 39, 78, 154, 180, 107, 55,
111, 86, 172, 62, 253, 208, 65, 156, 48, 105, 212, 46, 223, 92, 88, 57,
250, 146, 195, 45, 145, 221, 172, 89, 189, 250, 241, 131, 4, 97, 195, 104,
81, 238, 188, 76, 113, 245, 228, 71, 11, 114, 135, 101, 162, 171, 57, 191,
82, 240, 61, 132, 17, 163, 76, 121, 245, 226, 199, 9, 146, 134, 237, 162,
205, 185, 149, 178, 239, 53, 140, 23, 37, 206, 155, 20, 107, 79, 111, 116,
44, 39, 93, 218, 185, 155, 50, 235, 85, 143, 127, 36, 32, 27, 88, 11,
122, 135, 99, 34, 169, 217, 190, 218, 240, 91, 4, 59, 67, 83, 113, 253,
228, 65, 139, 112, 103, 100, 42, 171, 95, 63, 120, 16, 34, 140, 25, 165,
202, 251, 23, 3, 78, 129, 244, 96, 71, 104, 50, 174, 149, 188, 111, 49,
236, 20, 77, 207, 117, 148, 39, 47, 90, 156, 59, 41, 211, 94, 221, 248,
89, 130, 186, 225, 179, 8, 117, 198, 167, 18, 250, 141, 131, 37, 161, 219,
56, 91, 82, 187, 125, 179, 97, 181, 232, 119, 14, 166, 132, 122, 227, 99,
9, 233, 198, 206, 210, 212, 93, 159, 121, 168, 34, 254, 153, 128, 106, 224,
47, 8, 28, 6, 137, 194, 230, 209, 138, 220, 103, 25, 234, 138, 207, 39,
20, 26, 143, 75, 36, 55, 91, 86, 187, 126, 243, 96, 69, 232, 51, 14,
149, 196, 111, 19, 108, 13, 237, 197, 141, 147, 37, 173, 219, 61, 155, 81,
171, 124, 127, 97, 224, 40, 72, 30, 182, 136, 118, 230, 166, 202, 250, 215,
3, 30, 129, 200, 96, 86, 168, 62, 254, 144, 64, 108, 48, 45, 212, 29,
159, 73, 168, 54, 254, 150, 192, 110, 208, 44, 92, 29, 249, 201, 130, 214,
225, 158, 200, 104, 86, 174, 190, 252, 112, 65, 228, 48, 75, 84, 55, 127,
86, 160, 62, 248, 16, 66, 140, 49, 165, 212, 123, 31, 99, 72, 41, 246,
158, 198, 232, 82, 206, 189, 148, 113, 175, 100, 124, 43, 97, 223, 104, 88,
46, 186, 156, 115, 41, 229, 222, 203, 24, 87, 74, 190, 183, 48, 118, 148,
38, 239, 90, 204, 59, 21, 211, 79, 29, 244, 9, 135, 70, 226, 178, 201,
181, 150, 247, 46, 198, 156, 82, 233, 253, 142, 193, 164, 80, 123, 124, 35,
97, 217, 232, 90, 206, 187, 20, 115, 79, 101, 244, 43, 7, 95, 66, 184,
49, 178, 148, 117, 175, 103, 60, 42, 145, 223, 44, 88, 29, 250, 137, 131,
38, 225, 218, 200, 91, 22, 187, 78, 243, 116, 69, 231, 115, 10, 165, 199,
59, 18, 147, 77, 173, 245, 189, 135, 49, 162, 148, 121, 175, 98, 252, 41,
129, 222, 224, 88, 72, 58, 182, 147, 54, 237, 214, 205, 158, 213, 168, 95,
62, 184, 16, 114, 140, 37, 165, 219, 59, 27, 83, 75, 125, 247, 97, 134,
168, 98, 254, 169, 128, 126, 224, 32, 72, 24, 54, 138, 150, 231, 46, 202,
156, 87, 41, 254, 158, 192, 104, 80, 46, 188, 28, 113, 201, 228, 86, 203,
126, 215, 96, 94, 168, 56, 126, 146, 160, 109, 184, 45, 178, 157, 181, 169,
183, 62, 246, 144, 70, 236, 50, 205, 213, 149, 159, 47, 40, 28, 30, 137,
200, 102, 214, 170, 222, 255, 24, 64, 10, 176, 7, 52, 2, 151, 65, 174,
176, 124, 116, 33, 231, 88, 74, 186, 183, 51, 54, 149, 214, 239, 30, 204,
8, 85, 198, 191, 18, 240, 13, 132, 5, 163, 67, 57, 241, 210, 196, 93,
147, 121, 173, 226, 253, 137, 129, 166, 224, 122, 200, 35, 22, 153, 206, 234,
212, 79, 31, 116, 8, 39, 70, 154, 178, 235, 53, 143, 87, 36, 62, 155,
80, 107, 124, 47, 97, 220, 40, 89, 222, 186, 216, 115, 26, 165, 203, 59,
23, 83, 78, 189, 244, 113, 135, 100, 98, 171, 105, 191, 110, 240, 44, 68,
29, 243, 73, 133, 246, 227, 6, 201, 194, 214, 209, 158, 220, 104, 89, 238,
186, 204, 115, 21, 229, 207, 11, 20, 7, 79, 66, 180, 49, 183, 84, 118,
191, 102, 240, 42, 196, 31, 19, 72, 13, 246, 133, 134, 227, 34, 201, 217,
150, 218, 238, 219, 12, 91, 69, 251, 115, 3, 101, 193, 235, 16, 79, 76,
52, 53, 215, 87, 30, 190, 136, 112, 102, 164, 42, 251, 95, 3, 120, 1,
226, 128, 73, 160, 54, 248, 22, 194, 142, 209, 164, 92, 123, 121, 227, 98,
201, 233, 150, 206, 238, 212, 76, 95, 117, 248, 39, 2, 154, 129, 171, 32,
127, 88, 32, 58, 152, 19, 42, 141, 223, 37, 152, 27, 42, 139, 95, 39,
120, 26, 162, 139, 57, 167, 82, 250, 189, 131, 49, 161, 212, 120, 95, 98,
184, 41, 178, 158, 245, 168, 71, 62, 178, 144, 117, 172, 39, 61, 218, 145,
155, 44, 107, 93, 239, 121, 140, 34, 229, 217, 139, 26, 231, 75, 10, 183,
71, 54, 178, 150, 245, 174, 199, 60, 82, 145, 253, 172, 65, 189, 240, 113,
132, 36, 99, 91, 105, 251, 110, 195, 108, 81, 237, 252, 77, 129, 245, 160,
71, 56, 50, 146, 149, 173, 175, 61, 188, 17, 177, 204, 116, 85, 231, 127,
10, 160, 7, 56, 2, 146, 129, 173, 160, 125, 184, 33, 178, 152, 117, 170,
167, 63, 58, 144, 19, 44, 13, 221, 197, 153, 147, 42, 237, 223, 13, 152,
5, 170, 131, 63, 33, 208, 24, 92, 10, 185, 199, 50, 210, 149, 157, 175,
41, 188, 30, 241, 200, 68, 86, 179, 126, 245, 224, 71, 8, 50, 134, 149,
162, 239, 57, 140, 18, 229, 205, 139, 21, 167, 79, 58, 180, 19, 55, 77,
214, 181, 158, 247, 40, 70, 158, 178, 232, 117, 142, 167, 36, 122, 155, 99,
43, 105, 223, 110, 216, 44, 90, 157, 251, 41, 131, 94, 225, 248, 72, 66,
182, 177, 182, 244, 118, 199, 102, 210, 170, 221, 191, 25, 176, 10, 244, 7,
7, 66, 130, 177, 161, 180, 120, 119, 98, 166, 169, 186, 254, 243, 0, 69,
192, 51, 16, 21, 204, 15, 21, 196, 15, 19, 68, 13, 243, 69, 133, 243,
35, 5, 217, 195, 26, 209, 203, 28, 87, 73, 254, 182, 192, 118, 208, 38,
220, 26, 217, 203, 26, 215, 75, 30, 183, 72, 118, 182, 166, 246, 250, 198,
195, 18, 209, 205, 156, 85, 169, 255, 62, 192, 16, 80, 12, 60, 5, 209,
195, 28, 81, 201, 252, 86, 193, 254, 208, 64, 92, 48, 57, 212, 18, 223,
77, 152, 53, 170, 151, 63, 46, 144, 28, 108, 9, 237, 198, 205, 146, 213,
173, 159, 61, 168, 17, 190, 140, 112, 101, 228, 43, 11, 95, 71, 120, 50,
162, 149, 185, 175, 50, 252, 21, 129, 207, 32, 84, 24, 63, 74, 144, 55,
44, 22, 157, 206, 233, 148, 78, 239, 116, 76, 39, 117, 218, 167, 27, 58,
139, 83, 39, 125, 218, 161, 155, 56, 107, 82, 175, 125, 188, 33, 177, 216,
116, 90, 167, 123, 58, 163, 83, 57, 253, 210, 193, 157, 144, 105, 172, 46,
253, 220, 65, 153, 240, 106, 196, 47, 19, 92, 13, 249, 197, 130, 211, 33,
157, 216, 105, 154, 174, 235, 60, 79, 81, 244, 60, 71, 81, 242, 188, 69,
177, 243, 52, 69, 215, 115, 30, 165, 200, 123, 22, 163, 78, 249, 244, 66,
199, 113, 146, 164, 109, 187, 109, 179, 109, 181, 237, 183, 13, 182, 133, 182,
227, 54, 201, 214, 214, 222, 222, 216, 88, 90, 186, 187, 51, 51, 255, 63 )
random_mask_vec8 = numpy.array(random_mask_tuple, numpy.uint8)
| gpl-3.0 | 3,412,586,387,116,554,000 | 59.814815 | 106 | 0.526689 | false |
aquavitae/rst2pdf | gui/Ui_stylesheets.py | 12 | 5325 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'stylesheets.ui'
#
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(619, 451)
self.horizontalLayout_2 = QtGui.QHBoxLayout(Form)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.system = QtGui.QListWidget(Form)
self.system.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.system.setTextElideMode(QtCore.Qt.ElideMiddle)
self.system.setObjectName("system")
self.verticalLayout_2.addWidget(self.system)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.addFromSystem = QtGui.QToolButton(Form)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/next.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addFromSystem.setIcon(icon)
self.addFromSystem.setObjectName("addFromSystem")
self.horizontalLayout_2.addWidget(self.addFromSystem)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.addFromFile = QtGui.QToolButton(Form)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/new.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.addFromFile.setIcon(icon1)
self.addFromFile.setObjectName("addFromFile")
self.horizontalLayout.addWidget(self.addFromFile)
self.remove = QtGui.QToolButton(Form)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/close.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.remove.setIcon(icon2)
self.remove.setObjectName("remove")
self.horizontalLayout.addWidget(self.remove)
self.up = QtGui.QToolButton(Form)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/up.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.up.setIcon(icon3)
self.up.setObjectName("up")
self.horizontalLayout.addWidget(self.up)
self.down = QtGui.QToolButton(Form)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/down.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.down.setIcon(icon4)
self.down.setObjectName("down")
self.horizontalLayout.addWidget(self.down)
self.verticalLayout.addLayout(self.horizontalLayout)
self.custom = QtGui.QListWidget(Form)
self.custom.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.custom.setTextElideMode(QtCore.Qt.ElideMiddle)
self.custom.setObjectName("custom")
self.verticalLayout.addWidget(self.custom)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(Form)
QtCore.QObject.connect(self.system, QtCore.SIGNAL("itemSelectionChanged()"), Form.applyChanges)
QtCore.QObject.connect(self.custom, QtCore.SIGNAL("itemSelectionChanged()"), Form.applyChanges)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "System StyleSheets:", None, QtGui.QApplication.UnicodeUTF8))
self.addFromSystem.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Custom StyleSheets:", None, QtGui.QApplication.UnicodeUTF8))
self.addFromFile.setToolTip(QtGui.QApplication.translate("Form", "Add another stylesheet", None, QtGui.QApplication.UnicodeUTF8))
self.addFromFile.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.remove.setToolTip(QtGui.QApplication.translate("Form", "Remove selected stylesheet", None, QtGui.QApplication.UnicodeUTF8))
self.remove.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.up.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
self.down.setText(QtGui.QApplication.translate("Form", "...", None, QtGui.QApplication.UnicodeUTF8))
import icons_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| mit | 6,564,326,961,079,292,000 | 50.201923 | 137 | 0.700657 | false |
noahchense/esptool | esptool.py | 6 | 27179 | #!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import serial
import math
import time
import argparse
import os
import subprocess
import tempfile
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x400
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port = 0, baud = ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length = 1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise Exception('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0'+(packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc'))+'\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state = ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op = None, data = None, chk = 0):
if op:
# Construct and send request
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise Exception('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01 or (op and op_ret != op):
raise Exception('Invalid response')
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise Exception('Invalid end of packet')
return val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20'+32*'\x55')
for i in xrange(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print 'Connecting...'
for _ in xrange(4):
# issue reset-to-bootloader:
# RTS = either CH_PD or nRESET (both active low = chip in reset)
# DTR = GPIO0 (active low = boot to flasher)
self._port.setDTR(False)
self._port.setRTS(True)
time.sleep(0.05)
self._port.setDTR(True)
self._port.setRTS(False)
time.sleep(0.05)
self._port.setDTR(False)
self._port.timeout = 0.3 # worst-case latency timer should be 255ms (probably <20ms)
for _ in xrange(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise Exception('Failed to connect')
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise Exception('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us = 0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise Exception('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0":
raise Exception('Failed to enter RAM download mode')
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
if self.command(ESPROM.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0)+data, ESPROM.checksum(data))[1] != "\0\0":
raise Exception('Failed to write to target RAM')
""" Leave download mode and run the application """
def mem_finish(self, entrypoint = 0):
if self.command(ESPROM.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0":
raise Exception('Failed to leave RAM download mode')
""" Start downloading to Flash (performs an erase) """
def flash_begin(self, size, offset):
old_tmo = self._port.timeout
num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK
sectors_per_block = 16
sector_size = 4096
num_sectors = (size + sector_size - 1) / sector_size
start_sector = offset / sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
erase_size = (num_sectors + 1) / 2 * sector_size
else:
erase_size = (num_sectors - head_sectors) * sector_size
self._port.timeout = 10
if self.command(ESPROM.ESP_FLASH_BEGIN,
struct.pack('<IIII', erase_size, num_blocks, ESPROM.ESP_FLASH_BLOCK, offset))[1] != "\0\0":
raise Exception('Failed to enter Flash download mode')
self._port.timeout = old_tmo
""" Write block to flash """
def flash_block(self, data, seq):
if self.command(ESPROM.ESP_FLASH_DATA,
struct.pack('<IIII', len(data), seq, 0, 0)+data, ESPROM.checksum(data))[1] != "\0\0":
raise Exception('Failed to write to target Flash')
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot = False):
pkt = struct.pack('<I', int(not reboot))
if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0":
raise Exception('Failed to leave Flash mode')
""" Run application code in flash """
def run(self, reboot = False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read MAC from OTP ROM """
def read_mac(self):
mac0 = esp.read_reg(esp.ESP_OTP_MAC0)
mac1 = esp.read_reg(esp.ESP_OTP_MAC1)
if ((mac1 >> 16) & 0xff) == 0:
oui = (0x18, 0xfe, 0x34)
elif ((mac1 >> 16) & 0xff) == 1:
oui = (0xac, 0xd0, 0x74)
else:
raise Exception("Unknown OUI")
return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
self.flash_begin(0, 0)
self.write_reg(0x60000240, 0x0, 0xffffffff)
self.write_reg(0x60000200, 0x10000000, 0xffffffff)
flash_id = esp.read_reg(0x60000240)
self.flash_finish(False)
return flash_id
""" Read SPI flash """
def flash_read(self, offset, size, count = 1):
# Create a custom stub
stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# Download stub
self.mem_begin(len(stub), 1, len(stub), 0x40100000)
self.mem_block(stub, 0)
self.mem_finish(0x4010001c)
# Fetch the data
data = ''
for _ in xrange(count):
if self._port.read(1) != '\xc0':
raise Exception('Invalid head of packet (sflash read)')
data += self.read(size)
if self._port.read(1) != chr(0xc0):
raise Exception('Invalid end of packet (sflash read)')
return data
""" Abuse the loader protocol to force flash to be left in write mode """
def flash_unlock_dio(self):
# Enable flash write mode
self.flash_begin(0, 0)
# Reset the chip rather than call flash_finish(), which would have
# write protected the chip again (why oh why does it do that?!)
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40000080)
""" Perform a chip erase of SPI flash """
def flash_erase(self):
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# This is hacky: we don't have a custom stub, instead we trick
# the bootloader to jump to the SPIEraseChip() routine and then halt/crash
# when it tries to boot an unconfigured system.
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40004984)
# Yup - there's no good way to detect if we succeeded.
# It it on the other hand unlikely to fail.
class ESPFirmwareImage:
def __init__(self, filename = None):
self.segments = []
self.entrypoint = 0
self.flash_mode = 0
self.flash_size_freq = 0
if filename is not None:
f = file(filename, 'rb')
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8))
# some sanity check
if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16:
raise Exception('Invalid firmware image')
for i in xrange(segments):
(offset, size) = struct.unpack('<II', f.read(8))
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
raise Exception('Suspicious segment 0x%x, length %d' % (offset, size))
segment_data = f.read(size)
if len(segment_data) < size:
raise Exception('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
self.segments.append((offset, size, segment_data))
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align = 15-(f.tell() % 16)
f.seek(align, 1)
self.checksum = ord(f.read(1))
def add_segment(self, addr, data):
# Data should be aligned on word boundary
l = len(data)
if l % 4:
data += b"\x00" * (4 - l % 4)
if l > 0:
self.segments.append((addr, len(data), data))
def save(self, filename):
f = file(filename, 'wb')
f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (offset, size, data) in self.segments:
f.write(struct.pack('<II', offset, size))
f.write(data)
checksum = ESPROM.checksum(data, checksum)
align = 15-(f.tell() % 16)
f.seek(align, 1)
f.write(struct.pack('B', checksum))
class ELFFile:
def __init__(self, name):
self.name = name
self.symbols = None
def _fetch_symbols(self):
if self.symbols is not None:
return
self.symbols = {}
try:
tool_nm = "xtensa-lx106-elf-nm"
if os.getenv('XTENSA_CORE')=='lx106':
tool_nm = "xt-nm"
proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling "+tool_nm+", do you have Xtensa toolchain in PATH?"
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
self.symbols[fields[2]] = int(fields[0], 16)
def get_symbol_addr(self, sym):
self._fetch_symbols()
return self.symbols[sym]
def get_entry_point(self):
tool_readelf = "xtensa-lx106-elf-readelf"
if os.getenv('XTENSA_CORE')=='lx106':
tool_readelf = "xt-readelf"
try:
proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling "+tool_readelf+", do you have Xtensa toolchain in PATH?"
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
if fields[0] == "Entry":
return int(fields[3], 0);
def load_section(self, section):
tool_objcopy = "xtensa-lx106-elf-objcopy"
if os.getenv('XTENSA_CORE')=='lx106':
tool_objcopy = "xt-objcopy"
tmpsection = tempfile.mktemp(suffix=".section")
try:
subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection])
with open(tmpsection, "rb") as f:
data = f.read()
finally:
os.remove(tmpsection)
return data
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) / int(b)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'ESP8266 ROM Bootloader Utility', prog = 'esptool')
parser.add_argument(
'--port', '-p',
help = 'Serial port device',
default = '/dev/ttyUSB0')
parser.add_argument(
'--baud', '-b',
help = 'Serial port baud rate',
type = arg_auto_int,
default = ESPROM.ESP_ROM_BAUD)
subparsers = parser.add_subparsers(
dest = 'operation',
help = 'Run esptool {command} -h for additional help')
parser_load_ram = subparsers.add_parser(
'load_ram',
help = 'Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help = 'Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help = 'Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help = 'Base address', type = arg_auto_int)
parser_dump_mem.add_argument('size', help = 'Size of region to dump', type = arg_auto_int)
parser_dump_mem.add_argument('filename', help = 'Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help = 'Read arbitrary memory location')
parser_read_mem.add_argument('address', help = 'Address to read', type = arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help = 'Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help = 'Address to write', type = arg_auto_int)
parser_write_mem.add_argument('value', help = 'Value', type = arg_auto_int)
parser_write_mem.add_argument('mask', help = 'Mask of bits to write', type = arg_auto_int)
parser_write_flash = subparsers.add_parser(
'write_flash',
help = 'Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', nargs = '+', help = 'Address and binary file to write there, separated by space')
parser_write_flash.add_argument('--flash_freq', '-ff', help = 'SPI Flash frequency',
choices = ['40m', '26m', '20m', '80m'], default = '40m')
parser_write_flash.add_argument('--flash_mode', '-fm', help = 'SPI Flash mode',
choices = ['qio', 'qout', 'dio', 'dout'], default = 'qio')
parser_write_flash.add_argument('--flash_size', '-fs', help = 'SPI Flash size in Mbit',
choices = ['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default = '4m')
parser_run = subparsers.add_parser(
'run',
help = 'Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help = 'Dump headers from an application image')
parser_image_info.add_argument('filename', help = 'Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help = 'Create an application image from binary files')
parser_make_image.add_argument('output', help = 'Output image file')
parser_make_image.add_argument('--segfile', '-f', action = 'append', help = 'Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action = 'append', help = 'Segment base address', type = arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help = 'Address of entry point', type = arg_auto_int, default = 0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help = 'Create an application image from ELF file')
parser_elf2image.add_argument('input', help = 'Input ELF file')
parser_elf2image.add_argument('--output', '-o', help = 'Output filename prefix', type = str)
parser_elf2image.add_argument('--flash_freq', '-ff', help = 'SPI Flash frequency',
choices = ['40m', '26m', '20m', '80m'], default = '40m')
parser_elf2image.add_argument('--flash_mode', '-fm', help = 'SPI Flash mode',
choices = ['qio', 'qout', 'dio', 'dout'], default = 'qio')
parser_elf2image.add_argument('--flash_size', '-fs', help = 'SPI Flash size in Mbit',
choices = ['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default = '4m')
parser_read_mac = subparsers.add_parser(
'read_mac',
help = 'Read MAC address from OTP ROM')
parser_flash_id = subparsers.add_parser(
'flash_id',
help = 'Read SPI flash manufacturer and device ID')
parser_read_flash = subparsers.add_parser(
'read_flash',
help = 'Read SPI flash content')
parser_read_flash.add_argument('address', help = 'Start address', type = arg_auto_int)
parser_read_flash.add_argument('size', help = 'Size of region to dump', type = arg_auto_int)
parser_read_flash.add_argument('filename', help = 'Name of binary dump')
parser_erase_flash = subparsers.add_parser(
'erase_flash',
help = 'Perform Chip Erase on SPI flash')
args = parser.parse_args()
# Create the ESPROM connection object, if needed
esp = None
if args.operation not in ('image_info','make_image','elf2image'):
esp = ESPROM(args.port, args.baud)
esp.connect()
# Do the actual work. Should probably be split into separate functions.
if args.operation == 'load_ram':
image = ESPFirmwareImage(args.filename)
print 'RAM boot...'
for (offset, size, data) in image.segments:
print 'Downloading %d bytes at %08x...' % (size, offset),
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset)
seq = 0
while len(data) > 0:
esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq)
data = data[esp.ESP_RAM_BLOCK:]
seq += 1
print 'done!'
print 'All segments done, executing at %08x' % image.entrypoint
esp.mem_finish(image.entrypoint)
elif args.operation == 'read_mem':
print '0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address))
elif args.operation == 'write_mem':
esp.write_reg(args.address, args.value, args.mask, 0)
print 'Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address)
elif args.operation == 'dump_mem':
f = file(args.filename, 'wb')
for i in xrange(args.size/4):
d = esp.read_reg(args.address+(i*4))
f.write(struct.pack('<I', d))
if f.tell() % 1024 == 0:
print '\r%d bytes read... (%d %%)' % (f.tell(), f.tell()*100/args.size),
sys.stdout.flush()
print 'Done!'
elif args.operation == 'write_flash':
assert len(args.addr_filename) % 2 == 0
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_info = struct.pack('BB', flash_mode, flash_size_freq)
while args.addr_filename:
address = int(args.addr_filename[0], 0)
filename = args.addr_filename[1]
args.addr_filename = args.addr_filename[2:]
image = file(filename, 'rb').read()
print 'Erasing flash...'
blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK)
esp.flash_begin(blocks*esp.ESP_FLASH_BLOCK, address)
seq = 0
written = 0
t = time.time()
while len(image) > 0:
print '\rWriting at 0x%08x... (%d %%)' % (address + seq*esp.ESP_FLASH_BLOCK, 100*(seq+1)/blocks),
sys.stdout.flush()
block = image[0:esp.ESP_FLASH_BLOCK]
# Fix sflash config data
if address == 0 and seq == 0 and block[0] == '\xe9':
block = block[0:2] + flash_info + block[4:]
# Pad the last block
block = block + '\xff' * (esp.ESP_FLASH_BLOCK-len(block))
esp.flash_block(block, seq)
image = image[esp.ESP_FLASH_BLOCK:]
seq += 1
written += len(block)
t = time.time() - t
print '\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000)
print '\nLeaving...'
if args.flash_mode == 'dio':
esp.flash_unlock_dio()
else:
esp.flash_begin(0, 0)
esp.flash_finish(False)
elif args.operation == 'run':
esp.run()
elif args.operation == 'image_info':
image = ESPFirmwareImage(args.filename)
print ('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set'
print '%d segments' % len(image.segments)
print
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (idx, (offset, size, data)) in enumerate(image.segments):
print 'Segment %d: %5d bytes at %08x' % (idx+1, size, offset)
checksum = ESPROM.checksum(data, checksum)
print
print 'Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!')
elif args.operation == 'make_image':
image = ESPFirmwareImage()
if len(args.segfile) == 0:
raise Exception('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise Exception('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = file(seg, 'rb').read()
image.add_segment(addr, data)
image.entrypoint = args.entrypoint
image.save(args.output)
elif args.operation == 'elf2image':
if args.output is None:
args.output = args.input + '-'
e = ELFFile(args.input)
image = ESPFirmwareImage()
image.entrypoint = e.get_entry_point()
for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")):
data = e.load_section(section)
image.add_segment(e.get_symbol_addr(start), data)
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
image.save(args.output + "0x00000.bin")
data = e.load_section(".irom0.text")
off = e.get_symbol_addr("_irom0_text_start") - 0x40200000
assert off >= 0
f = open(args.output + "0x%05x.bin" % off, "wb")
f.write(data)
f.close()
elif args.operation == 'read_mac':
mac = esp.read_mac()
print 'MAC: %s' % ':'.join(map(lambda x: '%02x'%x, mac))
elif args.operation == 'flash_id':
flash_id = esp.flash_id()
print 'Manufacturer: %02x' % (flash_id & 0xff)
print 'Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff)
elif args.operation == 'read_flash':
print 'Please wait...'
file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size])
elif args.operation == 'erase_flash':
esp.flash_erase()
| gpl-2.0 | -2,963,818,540,755,623,000 | 39.265185 | 152 | 0.572795 | false |
jfietkau/Streets4MPI | simulation.py | 2 | 6805 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# simulation.py
# Copyright 2012 Julian Fietkau <http://www.julian-fietkau.de/>,
# Joachim Nitschke
#
# This file is part of Streets4MPI.
#
# Streets4MPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Streets4MPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Streets4MPI. If not, see <http://www.gnu.org/licenses/>.
#
from time import time
from math import sqrt
from operator import itemgetter
from array import array
from itertools import repeat
from osmdata import GraphBuilder
from streetnetwork import StreetNetwork
from settings import settings
# This class does the actual simulation steps
class Simulation(object):
def __init__(self, street_network, trips, jam_tolerance, log_callback):
self.street_network = street_network
self.trips = trips
self.jam_tolerance = jam_tolerance
self.log_callback = log_callback
self.step_counter = 0
self.traffic_load = array("I", repeat(0, self.street_network.street_index))
self.cumulative_traffic_load = None
def step(self):
self.step_counter += 1
self.log_callback("Preparing edges...")
# update driving time based on traffic load
for street, street_index, length, max_speed in self.street_network:
street_traffic_load = self.traffic_load[street_index]
# ideal speed is when the street is empty
ideal_speed = calculate_driving_speed(length, max_speed, 0)
# actual speed may be less then that
actual_speed = calculate_driving_speed(length, max_speed, street_traffic_load)
# based on traffic jam tolerance the deceleration is weighted differently
perceived_speed = actual_speed + (ideal_speed - actual_speed) * self.jam_tolerance
driving_time = length / perceived_speed
self.street_network.set_driving_time(street, driving_time)
# reset traffic load
self.traffic_load = array("I", repeat(0, self.street_network.street_index))
origin_nr = 0
for origin in self.trips.keys():
# calculate all shortest paths from resident to every other node
origin_nr += 1
self.log_callback("Origin nr", str(origin_nr) + "...")
paths = self.street_network.calculate_shortest_paths(origin)
# increase traffic load
for goal in self.trips[origin]:
# is the goal even reachable at all? if not, ignore for now
if goal in paths:
# hop along the edges until we're there
current = goal
while current != origin:
street = (min(current, paths[current]), max(current, paths[current]))
current = paths[current]
usage = settings["trip_volume"]
street_index = self.street_network.get_street_index(street)
self.traffic_load[street_index] += usage
def road_construction(self):
dict_traffic_load = dict()
for i in range(0, len(self.cumulative_traffic_load)):
street = self.street_network.get_street_by_index(i)
dict_traffic_load[street] = self.cumulative_traffic_load[i]
sorted_traffic_load = sorted(dict_traffic_load.iteritems(), key = itemgetter(1))
max_decrease_index = 0.15 * len(sorted_traffic_load) # bottom 15%
min_increase_index = 0.95 * len(sorted_traffic_load) # top 5%
for i in range(len(sorted_traffic_load)):
if i <= max_decrease_index:
if not self.street_network.change_maxspeed(sorted_traffic_load[i][0], -20):
max_decrease_index += 1
j = len(sorted_traffic_load) - i - 1
if j >= min_increase_index:
if not self.street_network.change_maxspeed(sorted_traffic_load[j][0], 20):
min_increase_index -= 1
if max_decrease_index >= min_increase_index:
break
self.cumulative_traffic_load = None
def calculate_driving_speed_var(street_length, max_speed, number_of_trips):
# individual formulae:
# number of trips per time = (number of trips * street length) / (actual speed * traffic period duration)
# available space for each car = street length / max(number of trips per time, 1)
# available braking distance = max(available space for each car - car length, min breaking distance)
# how fast can a car drive to ensure the calculated breaking distance?
# potential speed = sqrt(braking deceleration * available braking distance * 2)
# actual speed = min(max speed, potential speed)
# all in one calculation:
intermediate_quotient_result = settings["traffic_period_duration"] * 3600 * settings["braking_deceleration"] / max(number_of_trips, 1)
potential_speed = sqrt(intermediate_quotient_result**2 + 2 * settings["car_length"] * settings["braking_deceleration"]) + intermediate_quotient_result # m/s
actual_speed = min(max_speed, potential_speed * 3.6) # km/h
return actual_speed
def calculate_driving_speed(street_length, max_speed, number_of_trips):
# distribute trips over the street
available_space_for_each_car = street_length / max(number_of_trips, 1) # m
available_braking_distance = max(available_space_for_each_car - settings["car_length"], settings["min_breaking_distance"]) # m
# how fast can a car drive to ensure the calculated breaking distance?
potential_speed = sqrt(settings["braking_deceleration"] * available_braking_distance * 2) # m/s
# cars respect speed limit
actual_speed = min(max_speed, potential_speed * 3.6) # km/h
return actual_speed
if __name__ == "__main__":
def out(*output):
for o in output:
print o,
print ''
street_network = StreetNetwork()
street_network.add_node(1, 0, 0)
street_network.add_node(2, 0, 0)
street_network.add_node(3, 0, 0)
street_network.add_street((1, 2,), 10, 50)
street_network.add_street((2, 3,), 100, 140)
trips = dict()
trips[1] = [3]
sim = Simulation(street_network, trips, out)
for step in range(10):
print "Running simulation step", step + 1, "of 10..."
sim.step()
# done
| gpl-3.0 | -1,157,296,579,521,337,600 | 41.006173 | 160 | 0.64482 | false |
acsone/knowledge | document_page/__openerp__.py | 1 | 1749 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Page',
'version': '8.0.1.0.2',
'category': 'Knowledge Management',
'author': 'OpenERP SA,Odoo Community Association (OCA)',
'website': 'http://www.openerp.com/',
'license': 'AGPL-3',
'depends': [
'knowledge',
'mail',
],
'data': [
'wizard/document_page_create_menu_view.xml',
'wizard/document_page_show_diff_view.xml',
'document_page_view.xml',
'security/document_page_security.xml',
'security/ir.model.access.csv',
],
'demo': [
'document_page_demo.xml'
],
'test': [
'test/document_page_test00.yml'
],
'installable': True,
'auto_install': False,
'images': [],
'css': ['static/src/css/document_page.css'],
}
| agpl-3.0 | -1,240,762,950,097,291,000 | 33.98 | 78 | 0.573471 | false |
bookbrainz/bookbrainz-schema | utils/v1_migration.py | 1 | 32081 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Copyright (C) 2015 Ben Ockmore
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" This module contains the functions necessary for migrating from v0.x of the
BookBrainz database to v1.x. There is no function to undo the operation,
therefore, it is advised that the database is backed up prior to running this
script.
"""
from __future__ import (absolute_import, division, print_function)
import click
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from bbschema import *
def migrate_types(session):
session.execute("""
INSERT INTO _bookbrainz.editor_type (
id, label
) SELECT
user_type_id, label
FROM bookbrainz.user_type
""")
session.execute("""
INSERT INTO _bookbrainz.publisher_type (
id, label
) SELECT
publisher_type_id, label
FROM bookbrainz.publisher_type
""")
session.execute("""
INSERT INTO _bookbrainz.creator_type (
id, label
) SELECT
creator_type_id, label
FROM bookbrainz.creator_type
""")
session.execute("""
INSERT INTO _bookbrainz.work_type (
id, label
) SELECT
work_type_id, label
FROM bookbrainz.work_type
""")
session.execute("""
INSERT INTO _bookbrainz.publication_type (
id, label
) SELECT
publication_type_id, label
FROM bookbrainz.publication_type
""")
session.execute("""
INSERT INTO _bookbrainz.edition_status (
id, label
) SELECT
edition_status_id, label
FROM bookbrainz.edition_status
""")
session.execute("""
INSERT INTO _bookbrainz.edition_format (
id, label
) SELECT
edition_format_id, label
FROM bookbrainz.edition_format
""")
session.execute("""
INSERT INTO _bookbrainz.identifier_type (
id, label, description, detection_regex, validation_regex,
display_template, entity_type, parent_id, child_order, deprecated
) SELECT
identifier_type_id, label, description, detection_regex,
validation_regex, 'Placeholder Template', 'Creator', parent_id,
child_order, false
FROM bookbrainz.identifier_type
""")
session.execute("""
INSERT INTO _bookbrainz.relationship_type (
id, label, description, display_template, source_entity_type,
target_entity_type, parent_id, child_order, deprecated
) SELECT
relationship_type_id, label, description, template, 'Creator',
'Creator', parent_id, child_order, deprecated
FROM bookbrainz.rel_type
""")
def migrate_editors(session):
session.execute("""
INSERT INTO _bookbrainz.editor (
id, name, email, reputation, bio, birth_date, created_at,
active_at, type_id, gender_id, area_id, password,
revisions_applied, revisions_reverted, total_revisions
) SELECT
user_id, name, email, reputation, COALESCE(bio, ''::text),
birth_date, created_at, active_at, user_type_id, gender_id, NULL,
password, revisions_applied, revisions_reverted, total_revisions
FROM bookbrainz.user
""")
session.execute("""
INSERT INTO _bookbrainz.editor__language (
editor_id, language_id, proficiency
) SELECT
user_id, language_id,
proficiency::text::_bookbrainz.lang_proficiency
FROM bookbrainz.user_language
""")
def migrate_revisions(session):
session.execute("""
INSERT INTO _bookbrainz.revision (
id, author_id, created_at
) SELECT
r.revision_id, r.user_id, r.created_at
FROM bookbrainz.revision r;
""")
session.execute("""
INSERT INTO _bookbrainz.note (
id, author_id, revision_id, content, posted_at
) SELECT
revision_note_id, user_id, revision_id, content, posted_at
FROM bookbrainz.revision_note;
""")
def limit_query(q, limit):
offset = 0
results = q.limit(limit).offset(offset).all()
while results:
for result in results:
yield result
offset += limit
results = q.limit(limit).offset(offset).all()
def convert_date(date, precision):
if date is None:
return (None, None, None)
else:
if precision == 'DAY':
return (date.year, date.month, date.day)
elif precision == 'MONTH':
return (date.year, date.month, None)
else:
return (date.year, None, None)
def insert_creator_data_and_revision(session, entity, revision, data,
alias_set_id, identifier_set_id,
relationship_set_id):
if data is not None:
begin_year, begin_month, begin_day = \
convert_date(data.begin_date, data.begin_date_precision)
end_year, end_month, end_day = \
convert_date(data.end_date, data.end_date_precision)
result = session.execute(
'''INSERT INTO _bookbrainz.creator_data (
alias_set_id, identifier_set_id, relationship_set_id,
annotation_id, disambiguation_id, begin_year, begin_month,
begin_day, end_year, end_month, end_day, ended, gender_id, type_id
) VALUES (
:alias_set_id, :identifier_set_id, :relationship_set_id,
:annotation_id, :disambiguation_id, :begin_year, :begin_month,
:begin_day, :end_year, :end_month, :end_day, :ended, :gender_id,
:type_id
) RETURNING id''', {
'alias_set_id': alias_set_id,
'identifier_set_id': identifier_set_id,
'relationship_set_id': relationship_set_id,
'annotation_id': data.annotation_id,
'disambiguation_id': data.disambiguation_id,
'begin_year': begin_year,
'begin_month': begin_month,
'begin_day': begin_day,
'end_year': end_year,
'end_month': end_month,
'end_day': end_day,
'ended': data.ended,
'gender_id': data.gender_id,
'type_id': data.creator_type_id
}
)
data_id = result.fetchone()[0]
else:
data_id = None
session.execute(
'''INSERT INTO _bookbrainz.creator_revision (
id, bbid, data_id
) VALUES (
:id, :bbid, :data_id
)''', {
'id': revision.revision_id,
'bbid': entity.entity_gid,
'data_id': data_id
}
)
def insert_edition_data_and_revision(session, entity, revision, data,
alias_set_id, identifier_set_id,
relationship_set_id):
if data is not None:
language_set_id = None
publisher_set_id = None
release_event_set_id = None
if data.language_id is not None:
language_set_id = session.execute(
'''INSERT INTO _bookbrainz.language_set
DEFAULT VALUES
RETURNING id'''
).fetchone()[0]
session.execute(
'''INSERT INTO _bookbrainz.language_set__language (
set_id, language_id
) VALUES (
:set_id, :language_id
)
''', {
'set_id': language_set_id,
'language_id': data.language_id
}
)
# Create a release event if release date is not NULL
if data.release_date is not None:
release_year, release_month, release_day = \
convert_date(data.release_date, data.release_date_precision)
result = session.execute(
'''INSERT INTO _bookbrainz.release_event (
"year", "month", "day"
) VALUES (
:year, :month, :day
) RETURNING id
''', {
'year': release_year,
'month': release_month,
'day': release_day
}
)
release_event_id = result.fetchone()[0]
release_event_set_id = session.execute(
'''INSERT INTO _bookbrainz.release_event_set
DEFAULT VALUES
RETURNING id'''
).fetchone()[0]
session.execute(
'''INSERT INTO _bookbrainz.release_event_set__release_event (
release_event_id, set_id
) VALUES (
:event_id, :set_id
)
''', {
'event_id': release_event_id,
'set_id': release_event_set_id
}
)
if data.publisher_gid is not None:
publisher_set_id = session.execute(
'''INSERT INTO _bookbrainz.publisher_set
DEFAULT VALUES
RETURNING id'''
).fetchone()[0]
session.execute(
'''INSERT INTO _bookbrainz.publisher_set__publisher (
set_id, publisher_bbid
) VALUES (
:set_id, :publisher_bbid
)
''', {
'set_id': publisher_set_id,
'publisher_bbid': data.publisher_gid
}
)
result = session.execute(
'''INSERT INTO _bookbrainz.edition_data (
alias_set_id, identifier_set_id, relationship_set_id,
annotation_id, disambiguation_id, publication_bbid, width, height,
depth, weight, pages, format_id, status_id, language_set_id,
release_event_set_id, publisher_set_id
) VALUES (
:alias_set_id, :identifier_set_id, :relationship_set_id,
:annotation_id, :disambiguation_id, :publication_bbid, :width,
:height, :depth, :weight, :pages, :format_id, :status_id,
:language_set_id, :release_event_set_id, :publisher_set_id
) RETURNING id''', {
'alias_set_id': alias_set_id,
'identifier_set_id': identifier_set_id,
'relationship_set_id': relationship_set_id,
'annotation_id': data.annotation_id,
'disambiguation_id': data.disambiguation_id,
'publication_bbid': data.publication_gid,
'width': data.width,
'height': data.height,
'depth': data.depth,
'weight': data.weight,
'pages': data.pages,
'format_id': data.edition_format_id,
'status_id': data.edition_status_id,
'language_set_id': language_set_id,
'release_event_set_id': release_event_set_id,
'publisher_set_id': publisher_set_id
}
)
data_id = result.fetchone()[0]
else:
data_id = None
session.execute(
'''INSERT INTO _bookbrainz.edition_revision (
id, bbid, data_id
) VALUES (
:id, :bbid, :data_id
)''', {
'id': revision.revision_id,
'bbid': entity.entity_gid,
'data_id': data_id
}
)
def insert_work_data_and_revision(session, entity, revision, data,
alias_set_id, identifier_set_id,
relationship_set_id):
if data is not None:
language_set_id = None
if data.languages:
language_set_id = session.execute(
'''INSERT INTO _bookbrainz.language_set
DEFAULT VALUES
RETURNING id'''
).fetchone()[0]
# Create a release event if release date is not NULL
for language in data.languages:
session.execute(
'''INSERT INTO _bookbrainz.language_set__language (
set_id, language_id
) VALUES (
:set_id, :language_id
)
''', {
'set_id': language_set_id,
'language_id': language.id
}
)
result = session.execute(
'''INSERT INTO _bookbrainz.work_data (
alias_set_id, identifier_set_id, relationship_set_id,
annotation_id, disambiguation_id, type_id, language_set_id
) VALUES (
:alias_set_id, :identifier_set_id, :relationship_set_id,
:annotation_id, :disambiguation_id, :type_id, :language_set_id
) RETURNING id''', {
'alias_set_id': alias_set_id,
'identifier_set_id': identifier_set_id,
'relationship_set_id': relationship_set_id,
'annotation_id': data.annotation_id,
'disambiguation_id': data.disambiguation_id,
'type_id': data.work_type_id,
'language_set_id': language_set_id
}
)
data_id = result.fetchone()[0]
else:
data_id = None
session.execute(
'''INSERT INTO _bookbrainz.work_revision (
id, bbid, data_id
) VALUES (
:id, :bbid, :data_id
)''', {
'id': revision.revision_id,
'bbid': entity.entity_gid,
'data_id': data_id
}
)
def insert_publisher_data_and_revision(session, entity, revision, data,
alias_set_id, identifier_set_id,
relationship_set_id):
if data is not None:
begin_year, begin_month, begin_day = \
convert_date(data.begin_date, data.begin_date_precision)
end_year, end_month, end_day = \
convert_date(data.end_date, data.end_date_precision)
result = session.execute(
'''INSERT INTO _bookbrainz.publisher_data (
alias_set_id, identifier_set_id, relationship_set_id,
annotation_id, disambiguation_id, begin_year, begin_month,
begin_day, end_year, end_month, end_day, ended, type_id
) VALUES (
:alias_set_id, :identifier_set_id, :relationship_set_id,
:annotation_id, :disambiguation_id, :begin_year, :begin_month,
:begin_day, :end_year, :end_month, :end_day, :ended, :type_id
) RETURNING id''', {
'alias_set_id': alias_set_id,
'identifier_set_id': identifier_set_id,
'relationship_set_id': relationship_set_id,
'annotation_id': data.annotation_id,
'disambiguation_id': data.disambiguation_id,
'begin_year': begin_year,
'begin_month': begin_month,
'begin_day': begin_day,
'end_year': end_year,
'end_month': end_month,
'end_day': end_day,
'ended': data.ended,
'type_id': data.publisher_type_id
}
)
data_id = result.fetchone()[0]
else:
data_id = None
session.execute(
'''INSERT INTO _bookbrainz.publisher_revision (
id, bbid, data_id
) VALUES (
:id, :bbid, :data_id
)''', {
'id': revision.revision_id,
'bbid': entity.entity_gid,
'data_id': data_id
}
)
def insert_publication_data_and_revision(session, entity, revision, data,
alias_set_id, identifier_set_id,
relationship_set_id):
if data is not None:
result = session.execute(
'''INSERT INTO _bookbrainz.publication_data (
alias_set_id, identifier_set_id, relationship_set_id,
annotation_id, disambiguation_id, type_id
) VALUES (
:alias_set_id, :identifier_set_id, :relationship_set_id,
:annotation_id, :disambiguation_id, :type_id
) RETURNING id''', {
'alias_set_id': alias_set_id,
'identifier_set_id': identifier_set_id,
'relationship_set_id': relationship_set_id,
'annotation_id': data.annotation_id,
'disambiguation_id': data.disambiguation_id,
'type_id': data.publication_type_id
}
)
data_id = result.fetchone()[0]
else:
data_id = None
session.execute(
'''INSERT INTO _bookbrainz.publication_revision (
id, bbid, data_id
) VALUES (
:id, :bbid, :data_id
)''', {
'id': revision.revision_id,
'bbid': entity.entity_gid,
'data_id': data_id
}
)
def migrate_entities(session):
session.execute("""
INSERT INTO _bookbrainz.entity (
bbid, type
) SELECT
entity_gid, _type::text::_bookbrainz.entity_type
FROM bookbrainz.entity
""")
# Leave master_revision_id NULL for now - they don't exist yet
session.execute("""
INSERT INTO _bookbrainz.creator_header (
bbid
) SELECT
entity_gid
FROM bookbrainz.entity
WHERE _type = 'Creator'
""")
session.execute("""
INSERT INTO _bookbrainz.edition_header (
bbid
) SELECT
entity_gid
FROM bookbrainz.entity
WHERE _type = 'Edition'
""")
session.execute("""
INSERT INTO _bookbrainz.publication_header (
bbid
) SELECT
entity_gid
FROM bookbrainz.entity
WHERE _type = 'Publication'
""")
session.execute("""
INSERT INTO _bookbrainz.publisher_header (
bbid
) SELECT
entity_gid
FROM bookbrainz.entity
WHERE _type = 'Publisher'
""")
session.execute("""
INSERT INTO _bookbrainz.work_header (
bbid
) SELECT
entity_gid
FROM bookbrainz.entity
WHERE _type = 'Work'
""")
def set_master_revision(session, bbid, entity_type, revision_id):
table = entity_type.lower()
session.execute("""
UPDATE _bookbrainz.{}_header SET master_revision_id=:revision_id
WHERE bbid=:bbid
""".format(table),
{'revision_id': revision_id, 'bbid': bbid})
def migrate_entity_data(session):
session.execute("""
INSERT INTO _bookbrainz.annotation (
id, content, last_revision_id
) SELECT DISTINCT ON(a.annotation_id)
a.annotation_id, a.content, r.revision_id
FROM bookbrainz.entity_revision er
LEFT JOIN bookbrainz.revision r
ON r.revision_id = er.revision_id
LEFT JOIN bookbrainz.entity_data ed
ON er.entity_data_id = ed.entity_data_id
LEFT JOIN bookbrainz.annotation a
ON ed.annotation_id = a.annotation_id
WHERE a.annotation_id IS NOT NULL
ORDER BY a.annotation_id, revision_id
""")
session.execute("""
INSERT INTO _bookbrainz.disambiguation (
id, comment
) SELECT
disambiguation_id, COALESCE(comment, ''::text)
FROM bookbrainz.disambiguation
""")
session.execute("""
INSERT INTO _bookbrainz.alias (
id, name, sort_name, language_id, "primary"
) SELECT
alias_id, name, sort_name, language_id, "primary"
FROM bookbrainz.alias
""")
session.execute("""
INSERT INTO _bookbrainz.identifier (
id, type_id, value
) SELECT
identifier_id, identifier_type_id, value
FROM bookbrainz.identifier
""")
# For each entity, go through all the entity and relationship revisions
# Keep track of the aliases, identifiers and relationships on the
# entity at all times
# For each entity revision, create a new alias set, identifier_set and
# relationship set, using the tracked aliases, identifier and
# relationships
EMPTY_RELATIONSHIP_SET = session.execute(
'INSERT INTO _bookbrainz.relationship_set DEFAULT VALUES RETURNING id'
).fetchone()[0]
EMPTY_ALIAS_SET = session.execute(
'INSERT INTO _bookbrainz.alias_set DEFAULT VALUES RETURNING id'
).fetchone()[0]
entity_query = session.query(Entity)
processed_revisions = []
for entity in limit_query(entity_query, 100):
print(entity)
print('--------------')
entity_revision_query = session.query(EntityRevision).\
filter(EntityRevision.entity_gid == entity.entity_gid)
relationship_revision_query = session.query(RelationshipRevision).\
join(RelationshipData).\
join(RelationshipEntity).\
filter(RelationshipEntity.entity_gid == entity.entity_gid)
all_revisions = (
entity_revision_query.all() + relationship_revision_query.all()
)
all_revisions = sorted(all_revisions, key=lambda x: x.created_at)
relationship_set_id = EMPTY_RELATIONSHIP_SET
aliases = []
alias_set_id = EMPTY_ALIAS_SET
identifiers = []
identifier_set_id = None
data = None
previous_revision = None
for revision in all_revisions:
print('r{}'.format(revision.revision_id))
if isinstance(revision, EntityRevision):
if revision.entity_data is not None:
aliases = revision.entity_data.aliases
identifiers = revision.entity_data.identifiers
default_alias = revision.entity_data.default_alias
# Create alias set
if aliases:
result = session.execute('''
INSERT INTO _bookbrainz.alias_set (
default_alias_id
) VALUES (
:default_id
) RETURNING id
''', {
"default_id":
default_alias.alias_id
if default_alias is not None else None
})
alias_set_id = result.fetchone()[0]
for alias in aliases:
session.execute('''
INSERT INTO _bookbrainz.alias_set__alias (
set_id, alias_id
) VALUES (
:set_id, :alias_id
)
''', {
"set_id": alias_set_id,
"alias_id": alias.alias_id
})
else:
alias_set_id = EMPTY_ALIAS_SET
# Create identifier set
if identifiers:
EMPTY_IDENTIFIER_SET = session.execute(
'INSERT INTO _bookbrainz.identifier_set DEFAULT VALUES RETURNING id'
).fetchone()[0]
result = session.execute('''INSERT INTO _bookbrainz.identifier_set
DEFAULT VALUES RETURNING id
''')
identifier_set_id = result.fetchone()[0]
# Add identifiers to set
for identifier in identifiers:
session.execute('''
INSERT INTO
_bookbrainz.identifier_set__identifier (
set_id, identifier_id
) VALUES (
:set_id, :identifier_id
)
''', {
"set_id": identifier_set_id,
"identifier_id": identifier.identifier_id
})
data = revision.entity_data
else:
# All relationship revisions add a relationship
rel_data = revision.relationship_data
# Create new relationship set
source = [e for e in rel_data.entities if e.position == 0][0]
target = [e for e in rel_data.entities if e.position == 1][0]
# Find or create relationship
result = session.execute(
'SELECT id FROM _bookbrainz.relationship WHERE id = :id',
{'id': rel_data.relationship_data_id}
).fetchone()
if result is None:
session.execute('''INSERT INTO _bookbrainz.relationship (
id, type_id, source_bbid, target_bbid
) VALUES (
:id, :type_id, :source_bbid, :target_bbid
)
''', {
"id": rel_data.relationship_data_id,
"type_id": rel_data.relationship_type_id,
"source_bbid": source.entity_gid,
"target_bbid": target.entity_gid
})
# Create relationship_set
result = session.execute('''
INSERT INTO _bookbrainz.relationship_set
DEFAULT VALUES RETURNING id
''')
new_relationship_set_id = result.fetchone()[0]
# Link previous relationships to new relationship set
if relationship_set_id is not None:
session.execute('''
INSERT INTO
_bookbrainz.relationship_set__relationship (
set_id, relationship_id
) SELECT
:new_set_id, relationship_id
FROM _bookbrainz.relationship_set__relationship
WHERE set_id = :set_id
''', {
'set_id': relationship_set_id,
'new_set_id': new_relationship_set_id
})
# Add new relationship to the set
session.execute('''
INSERT INTO _bookbrainz.relationship_set__relationship (
set_id, relationship_id
) VALUES (
:set_id, :relationship_id
)
''', {
'set_id': new_relationship_set_id,
'relationship_id': rel_data.relationship_data_id
})
# Set current relationship_set_id to the new one
relationship_set_id = new_relationship_set_id
# Create entity data
if isinstance(entity, Creator):
insert_creator_data_and_revision(
session, entity, revision, data, alias_set_id,
identifier_set_id, relationship_set_id
)
set_master_revision(session, entity.entity_gid, 'Creator',
revision.revision_id)
elif isinstance(entity, Edition):
insert_edition_data_and_revision(
session, entity, revision, data, alias_set_id,
identifier_set_id, relationship_set_id
)
set_master_revision(session, entity.entity_gid, 'Edition',
revision.revision_id)
elif isinstance(entity, Work):
insert_work_data_and_revision(
session, entity, revision, data, alias_set_id,
identifier_set_id, relationship_set_id
)
set_master_revision(session, entity.entity_gid, 'Work',
revision.revision_id)
elif isinstance(entity, Publisher):
insert_publisher_data_and_revision(
session, entity, revision, data, alias_set_id,
identifier_set_id, relationship_set_id
)
set_master_revision(session, entity.entity_gid, 'Publisher',
revision.revision_id)
elif isinstance(entity, Publication):
insert_publication_data_and_revision(
session, entity, revision, data, alias_set_id,
identifier_set_id, relationship_set_id
)
set_master_revision(session, entity.entity_gid, 'Publication',
revision.revision_id)
else:
raise Exception('Err... what?')
pair = None
if previous_revision is not None:
pair = (previous_revision.revision_id, revision.revision_id)
if pair not in processed_revisions:
session.execute('''
INSERT INTO _bookbrainz.revision_parent (
parent_id, child_id
) VALUES (
:parent_id, :revision_id
)
''', {
'parent_id': previous_revision.revision_id,
'revision_id': revision.revision_id
})
processed_revisions.append(pair)
previous_revision = revision
@click.command()
@click.argument('username')
@click.argument('database')
@click.option('--password', prompt=True, hide_input=True,
help=('the password for the specified PostgreSQL user, prompted'
' for if not provided in the command line'))
@click.option('--host', default='localhost',
help='the hostname for the instance of PostgreSQL to connect to')
@click.option('--port', default=5432,
help='the port for the instance of PostgreSQL to connect to')
def migrate(username, database, password, **kwargs):
""" Migrates the specified database from v0.x to v1.x, using the provided
credentials and connections information.
"""
connection_string =\
'postgresql://{}:{}@{}:{}/{}'.format(
username, password, kwargs['host'], kwargs['port'], database
)
engine = create_engine(connection_string, echo=True)
Session = sessionmaker(bind=engine)
session = Session()
# Assume that the new schema exists in _bookbrainz
migrate_types(session)
migrate_editors(session)
migrate_revisions(session)
migrate_entities(session)
migrate_entity_data(session)
session.commit()
if __name__ == "__main__":
migrate()
| gpl-2.0 | 2,325,289,391,266,262,000 | 35.832377 | 96 | 0.509616 | false |
chouseknecht/ansible | lib/ansible/plugins/doc_fragments/docker.py | 9 | 7278 | # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
type: str
default: localhost
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
ca_cert:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_cert, cert_path ]
client_key:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_key, key_path ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server. Note that if C(tls_verify) is set to C(yes) as well, it will take precedence.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: no
validate_certs:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: no
aliases: [ tls_verify ]
debug:
description:
- Debug mode
type: bool
default: no
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docker-py.readthedocs.io/en/stable/machine/) for more details.
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(pip).
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
and use C($DOCKER_CONFIG/config.json) otherwise.
'''
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
DOCKER_PY_1_DOCUMENTATION = r'''
options: {}
requirements:
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should I(not)
be installed at the same time. Also note that when both modules are installed
and one of them is uninstalled, the other might no longer function and a
reinstall of it is required."
'''
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
DOCKER_PY_2_DOCUMENTATION = r'''
options: {}
requirements:
- "Python >= 2.7"
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
This module does I(not) work with docker-py."
'''
| gpl-3.0 | 748,627,635,697,639,000 | 52.514706 | 125 | 0.672987 | false |
eonpatapon/neutron | neutron/tests/unit/plugins/ml2/test_agent_scheduler.py | 9 | 1384 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import test_agent_scheduler
from neutron.tests.unit.plugins.ml2 import test_plugin
class Ml2AgentSchedulerTestCase(
test_agent_scheduler.OvsAgentSchedulerTestCase):
plugin_str = test_plugin.PLUGIN_NAME
l3_plugin = ('neutron.services.l3_router.'
'l3_router_plugin.L3RouterPlugin')
class Ml2L3AgentNotifierTestCase(
test_agent_scheduler.OvsL3AgentNotifierTestCase):
plugin_str = test_plugin.PLUGIN_NAME
l3_plugin = ('neutron.services.l3_router.'
'l3_router_plugin.L3RouterPlugin')
class Ml2DhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase):
plugin_str = test_plugin.PLUGIN_NAME
| apache-2.0 | 2,303,743,417,170,818,600 | 36.405405 | 78 | 0.737717 | false |
EDUlib/edx-platform | cms/djangoapps/contentstore/views/tests/test_access.py | 4 | 1746 | """
Tests access.py
"""
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from common.djangoapps.student.auth import add_users
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import AdminFactory
from ..access import get_user_role
class RolesTest(TestCase):
"""
Tests for lti user role serialization.
"""
def setUp(self):
""" Test case setup """
super().setUp()
self.global_admin = AdminFactory()
self.instructor = User.objects.create_user('testinstructor', '[email protected]', 'foo')
self.staff = User.objects.create_user('teststaff', '[email protected]', 'foo')
self.course_key = CourseLocator('mitX', '101', 'test')
def test_get_user_role_instructor(self):
"""
Verifies if user is instructor.
"""
add_users(self.global_admin, CourseInstructorRole(self.course_key), self.instructor)
self.assertEqual(
'instructor',
get_user_role(self.instructor, self.course_key)
)
add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)
self.assertEqual(
'instructor',
get_user_role(self.instructor, self.course_key)
)
def test_get_user_role_staff(self):
"""
Verifies if user is staff.
"""
add_users(self.global_admin, CourseStaffRole(self.course_key), self.staff)
self.assertEqual(
'staff',
get_user_role(self.staff, self.course_key)
)
| agpl-3.0 | 7,839,506,202,560,799,000 | 31.943396 | 109 | 0.65063 | false |
inetCatapult/troposphere | troposphere/elasticbeanstalk.py | 18 | 2738 | # Copyright (c) 2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
WebServer = "WebServer"
Worker = "Worker"
WebServerType = "Standard"
WorkerType = "SQS/HTTP"
class SourceBundle(AWSProperty):
props = {
'S3Bucket': (basestring, True),
'S3Key': (basestring, True),
}
class SourceConfiguration(AWSProperty):
props = {
'ApplicationName': (basestring, True),
'TemplateName': (basestring, True),
}
class OptionSettings(AWSProperty):
props = {
'Namespace': (basestring, True),
'OptionName': (basestring, True),
'Value': (basestring, True),
}
class Application(AWSObject):
resource_type = "AWS::ElasticBeanstalk::Application"
props = {
'ApplicationName': (basestring, False),
'Description': (basestring, False),
}
class ApplicationVersion(AWSObject):
resource_type = "AWS::ElasticBeanstalk::ApplicationVersion"
props = {
'ApplicationName': (basestring, True),
'Description': (basestring, False),
'SourceBundle': (SourceBundle, False),
}
class ConfigurationTemplate(AWSObject):
resource_type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
props = {
'ApplicationName': (basestring, True),
'Description': (basestring, False),
'EnvironmentId': (basestring, False),
'OptionSettings': ([OptionSettings], False),
'SolutionStackName': (basestring, False),
'SourceConfiguration': (SourceConfiguration, False),
}
def validate_tier_name(name):
valid_names = [WebServer, Worker]
if name not in valid_names:
raise ValueError('Tier name needs to be one of %r' % valid_names)
return name
def validate_tier_type(tier_type):
valid_types = [WebServerType, WorkerType]
if tier_type not in valid_types:
raise ValueError('Tier type needs to be one of %r' % valid_types)
return tier_type
class Tier(AWSProperty):
props = {
'Name': (validate_tier_name, False),
'Type': (validate_tier_type, False),
'Version': (basestring, False),
}
class Environment(AWSObject):
resource_type = "AWS::ElasticBeanstalk::Environment"
props = {
'ApplicationName': (basestring, True),
'CNAMEPrefix': (basestring, False),
'Description': (basestring, False),
'EnvironmentName': (basestring, False),
'OptionSettings': ([OptionSettings], False),
'SolutionStackName': (basestring, False),
'Tags': (Tags, False),
'TemplateName': (basestring, False),
'Tier': (Tier, False),
'VersionLabel': (basestring, False),
}
| bsd-2-clause | 2,374,930,236,219,634,000 | 25.07619 | 73 | 0.631848 | false |
GraemeFulton/job-search | docutils-0.12/tools/dev/unicode2rstsubs.py | 18 | 6726 | #! /usr/bin/env python
# $Id: unicode2rstsubs.py 7442 2012-06-13 23:27:03Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This program has been placed in the public domain.
"""
unicode2subfiles.py -- produce character entity files (reSructuredText
substitutions) from the W3C master unicode.xml file.
This program extracts character entity and entity set information from a
unicode.xml file and produces multiple reStructuredText files (in the current
directory) containing substitutions. Entity sets are from ISO 8879 & ISO
9573-13 (combined), MathML, and HTML4. One or two files are produced for each
entity set; a second file with a "-wide.txt" suffix is produced if there are
wide-Unicode characters in the set.
The input file, unicode.xml, is maintained as part of the MathML 2
Recommentation XML source, and is available from
<http://www.w3.org/2003/entities/xml/>.
"""
import sys
import os
import optparse
import re
from xml.parsers.expat import ParserCreate
usage_msg = """Usage: %s [unicode.xml]\n"""
def usage(prog, status=0, msg=None):
sys.stderr.write(usage_msg % prog)
if msg:
sys.stderr.write(msg + '\n')
sys.exit(status)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 2:
inpath = argv[1]
elif len(argv) > 2:
usage(argv[0], 2,
'Too many arguments (%s): only 1 expected.' % (len(argv) - 1))
else:
inpath = 'unicode.xml'
if not os.path.isfile(inpath):
usage(argv[0], 1, 'No such file: "%s".' % inpath)
if sys.version_info >= (3,0):
infile = open(inpath, mode='rb')
else:
infile = open(inpath)
process(infile)
def process(infile):
grouper = CharacterEntitySetExtractor(infile)
grouper.group()
grouper.write_sets()
class CharacterEntitySetExtractor:
"""
Extracts character entity information from unicode.xml file, groups it by
entity set, and writes out reStructuredText substitution files.
"""
unwanted_entity_sets = ['stix', # unknown, buggy set
'predefined']
header = """\
.. This data file has been placed in the public domain.
.. Derived from the Unicode character mappings available from
<http://www.w3.org/2003/entities/xml/>.
Processed by unicode2rstsubs.py, part of Docutils:
<http://docutils.sourceforge.net>.
"""
def __init__(self, infile):
self.infile = infile
"""Input unicode.xml file."""
self.parser = self.setup_parser()
"""XML parser."""
self.elements = []
"""Stack of element names. Last is current element."""
self.sets = {}
"""Mapping of charent set name to set dict."""
self.charid = None
"""Current character's "id" attribute value."""
self.descriptions = {}
"""Mapping of character ID to description."""
def setup_parser(self):
parser = ParserCreate()
parser.StartElementHandler = self.StartElementHandler
parser.EndElementHandler = self.EndElementHandler
parser.CharacterDataHandler = self.CharacterDataHandler
return parser
def group(self):
self.parser.ParseFile(self.infile)
def StartElementHandler(self, name, attributes):
self.elements.append(name)
handler = name + '_start'
if hasattr(self, handler):
getattr(self, handler)(name, attributes)
def EndElementHandler(self, name):
assert self.elements[-1] == name, \
'unknown end-tag %r (%r)' % (name, self.element)
self.elements.pop()
handler = name + '_end'
if hasattr(self, handler):
getattr(self, handler)(name)
def CharacterDataHandler(self, data):
handler = self.elements[-1] + '_data'
if hasattr(self, handler):
getattr(self, handler)(data)
def character_start(self, name, attributes):
self.charid = attributes['id']
def entity_start(self, name, attributes):
set = self.entity_set_name(attributes['set'])
if not set:
return
if set not in self.sets:
print('bad set: %r' % set)
return
entity = attributes['id']
assert (entity not in self.sets[set]
or self.sets[set][entity] == self.charid), \
('sets[%r][%r] == %r (!= %r)'
% (set, entity, self.sets[set][entity], self.charid))
self.sets[set][entity] = self.charid
def description_data(self, data):
self.descriptions.setdefault(self.charid, '')
self.descriptions[self.charid] += data
entity_set_name_pat = re.compile(r'[0-9-]*(.+)$')
"""Pattern to strip ISO numbers off the beginning of set names."""
def entity_set_name(self, name):
"""
Return lowcased and standard-number-free entity set name.
Return ``None`` for unwanted entity sets.
"""
match = self.entity_set_name_pat.match(name)
name = match.group(1).lower()
if name in self.unwanted_entity_sets:
return None
self.sets.setdefault(name, {})
return name
def write_sets(self):
sets = list(self.sets.keys())
sets.sort()
for set_name in sets:
self.write_set(set_name)
def write_set(self, set_name, wide=None):
if wide:
outname = set_name + '-wide.txt'
else:
outname = set_name + '.txt'
outfile = open(outname, 'w')
print('writing file "%s"' % outname)
outfile.write(self.header + '\n')
set = self.sets[set_name]
entities = [(e.lower(), e) for e in set.keys()]
entities.sort()
longest = 0
for _, entity_name in entities:
longest = max(longest, len(entity_name))
has_wide = None
for _, entity_name in entities:
has_wide = self.write_entity(
set, set_name, entity_name, outfile, longest, wide) or has_wide
if has_wide and not wide:
self.write_set(set_name, 1)
def write_entity(self, set, set_name, entity_name, outfile, longest,
wide=None):
charid = set[entity_name]
if not wide:
for code in charid[1:].split('-'):
if int(code, 16) > 0xFFFF:
return 1 # wide-Unicode character
codes = ' '.join(['U+%s' % code for code in charid[1:].split('-')])
outfile.write('.. %-*s unicode:: %s .. %s\n'
% (longest + 2, '|' + entity_name + '|',
codes, self.descriptions[charid]))
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | 3,207,403,906,967,389,000 | 31.809756 | 79 | 0.592923 | false |
h2oai/h2o-3 | h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_all_any_anyfactor_isfactor.py | 2 | 1152 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.utils.typechecks import assert_is_type
from tests import pyunit_utils
def h2o_H2OFrame_all():
"""
Python API test: h2o.frame.H2OFrame.all(), h2o.frame.H2OFrame.any(), h2o.frame.H2OFrame.anyfactor(),
h2o.frame.H2OFrame.isfactor()
"""
python_lists=[[True, False], [False, True], [True, True], [True, 'NA']]
h2oframe = h2o.H2OFrame(python_obj=python_lists, na_strings=['NA']) # contains true and false
assert not(h2oframe.all()), "h2o.H2OFrame.all() command is not working." # all elements are true or NA
assert h2oframe.any(), "h2o.H2OFrame.any() command is not working." # all elements are true or NA
assert h2oframe.anyfactor(), "h2o.H2OFrame.anyfactor() command is not working." # all columns are factors
clist = h2oframe.isfactor()
assert_is_type(clist, list) # check return type
assert sum(clist)==h2oframe.ncol, "h2o.H2OFrame.isfactor() command is not working." # check return result
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_all)
else:
h2o_H2OFrame_all()
| apache-2.0 | -2,593,628,902,225,177,600 | 41.666667 | 110 | 0.686632 | false |
vikingco/django-kong-admin | tests/test_logic.py | 1 | 34925 | from __future__ import unicode_literals, print_function
import uuid
from django.test import TestCase
from kong_admin import models
from kong_admin import logic
from kong_admin.factory import get_kong_client
from kong_admin.enums import Plugins
from .factories import APIReferenceFactory, PluginConfigurationReferenceFactory, ConsumerReferenceFactory, \
BasicAuthReferenceFactory, KeyAuthReferenceFactory, OAuth2ReferenceFactory
from .fake import fake
from kong_admin.models import PluginConfigurationReference
class APIReferenceLogicTestCase(TestCase):
def setUp(self):
self.client = get_kong_client()
self._cleanup_api = []
def tearDown(self):
self.client.close()
for api_ref in self._cleanup_api:
self.assertTrue(isinstance(api_ref, models.APIReference))
api_ref = models.APIReference.objects.get(id=api_ref.id) # reloads!!
logic.withdraw_api(self.client, api_ref)
def test_sync_incomplete_api(self):
# Create incomplete api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Try to sync, expect an error
with self.assertRaises(ValueError):
logic.synchronize_api(self.client, api_ref)
self.assertFalse(api_ref.synchronized)
# Fix api_ref
api_ref.request_host = fake.domain_name()
api_ref.save()
# Sync again
logic.synchronize_api(self.client, api_ref)
self.assertTrue(api_ref.synchronized)
# Check kong
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
self.assertEqual(result['request_host'], api_ref.request_host)
def test_sync_api(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Sync
logic.synchronize_api(self.client, api_ref)
self.assertTrue(api_ref.synchronized)
# Check kong
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
self.assertEqual(result['request_host'], api_ref.request_host)
def test_sync_updated_api(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish
logic.synchronize_api(self.client, api_ref)
self.assertTrue(api_ref.synchronized)
# Check kong
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
self.assertEqual(result['request_host'], api_ref.request_host)
self.assertEqual(result['name'], api_ref.request_host)
# Update
new_name = fake.api_name()
self.assertNotEqual(new_name, api_ref.name)
api_ref.name = new_name
api_ref.save()
# Publish
logic.synchronize_api(self.client, api_ref)
self.assertTrue(api_ref.synchronized)
# Check kong
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
self.assertEqual(result['request_host'], api_ref.request_host)
self.assertEqual(result['name'], new_name)
def test_withdraw_api(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Publish
logic.synchronize_api(self.client, api_ref)
self.assertTrue(api_ref.synchronized)
# Check kong
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
self.assertEqual(result['request_host'], api_ref.request_host)
# Store kong_id
kong_id = api_ref.kong_id
# You can delete afterwards
logic.withdraw_api(self.client, api_ref)
self.assertFalse(api_ref.synchronized)
# Check kong
with self.assertRaises(ValueError):
_ = self.client.apis.retrieve(kong_id)
def test_delete_api(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Publish
logic.synchronize_api(self.client, api_ref)
self.assertTrue(api_ref.synchronized)
# Check kong
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
self.assertEqual(result['request_host'], api_ref.request_host)
# You can delete afterwards
api_kong_id = api_ref.kong_id
api_ref.delete()
# Check kong
with self.assertRaises(ValueError):
_ = self.client.apis.retrieve(api_kong_id)
def test_sync_plugin_configuration_before_api(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref)
# Attempt to publish
with self.assertRaises(ValueError):
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
def test_sync_plugin_configuration_without_fields(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Check if remote upstream_url matches the locally known upstream_url
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref, config={})
# Attempt to publish
with self.assertRaises(ValueError):
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
# Make sure we did not get a Kong ID (meaning it did not sync to Kong)
self.assertIsNone(plugin_configuration_ref.kong_id)
def test_sync_plugin_configuration(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Check if remote upstream_url matches the locally known upstream_url
result = self.client.apis.retrieve(api_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['upstream_url'], api_ref.upstream_url)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref)
# Publish plugin_configuration
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
# Check
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
def test_withdraw_plugin_configuration(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref)
# Publish plugin_configuration
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
# Check if remote plugin name matches the locally known plugin
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
# Withdraw plugin_configuration
logic.withdraw_plugin_configuration(self.client, plugin_configuration_ref)
# Check
with self.assertRaises(ValueError):
_ = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
def test_delete_synchronized_plugin_configuration(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref)
# Publish plugin_configuration
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
# Check if remote plugin name matches the locally known plugin
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
# Delete plugin_configuration
plugin_configuration_kong_id = plugin_configuration_ref.kong_id
plugin_configuration_ref.delete()
# Check
with self.assertRaises(ValueError):
_ = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_kong_id)
def test_disable_synchronized_plugin_configuration(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref)
# Publish plugin_configuration
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
# Check if remote plugin name matches the locally known plugin, and that it is enabled
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
self.assertTrue(result['enabled'])
# Update plugin_configuration
logic.enable_plugin_configuration(self.client, plugin_configuration_ref, enabled=False)
# Check
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
self.assertFalse(result['enabled'])
def test_update_synchronized_plugin_configuration(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Create plugin_configuration
plugin_configuration_ref = PluginConfigurationReferenceFactory(api=api_ref)
# Publish plugin_configuration
logic.synchronize_plugin_configuration(self.client, plugin_configuration_ref)
# Check if remote plugin name matches the locally known plugin, and that the configuration matches the locally
# known configuration
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
self.assertEqual(result['config']['second'], plugin_configuration_ref.config['second'])
# Update plugin_configuration
new_value = 5
self.assertNotEqual(new_value, plugin_configuration_ref.config['second'])
plugin_configuration_ref.config['second'] = new_value
plugin_configuration_ref.save()
logic.publish_plugin_configuration(self.client, plugin_configuration_ref)
# Check
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
self.assertEqual(result['config']['second'], plugin_configuration_ref.config['second'])
def _cleanup_afterwards(self, api_ref):
self._cleanup_api.append(api_ref)
return api_ref
class ConsumerReferenceLogicTestCase(TestCase):
def setUp(self):
self.client = get_kong_client()
self._cleanup_consumers = []
def tearDown(self):
self.client.close()
for consumer_ref in self._cleanup_consumers:
self.assertTrue(isinstance(consumer_ref, models.ConsumerReference))
consumer_ref = models.ConsumerReference.objects.get(id=consumer_ref.id) # reloads!!
logic.withdraw_consumer(self.client, consumer_ref)
def test_incomplete_consumer(self):
# Create incomplete consumer_ref
consumer_ref = ConsumerReferenceFactory()
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Try to sync, expect an error
with self.assertRaises(ValueError):
logic.synchronize_consumer(self.client, consumer_ref)
self.assertFalse(consumer_ref.synchronized)
# Fix consumer_ref
consumer_ref.username = fake.consumer_name()
consumer_ref.save()
# Sync again
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Make sure the remote username matches the locally known username
result = self.client.consumers.retrieve(consumer_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], consumer_ref.username)
def test_sync_consumer(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Sync
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Make sure the remote username matches the locally known username
result = self.client.consumers.retrieve(consumer_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], consumer_ref.username)
def test_sync_updated_consumer(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Make sure the remote username matches the locally known username
result = self.client.consumers.retrieve(consumer_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], consumer_ref.username)
# Update
new_name = fake.consumer_name()
self.assertNotEqual(new_name, consumer_ref.username)
consumer_ref.username = new_name
consumer_ref.save()
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Make sure the remote username matches the new username
result = self.client.consumers.retrieve(consumer_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], new_name)
def test_withdraw_consumer(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Make sure the remote username matches the locally known username
result = self.client.consumers.retrieve(consumer_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], consumer_ref.username)
# Store kong_id
kong_id = consumer_ref.kong_id
# You can delete afterwards
logic.withdraw_consumer(self.client, consumer_ref)
self.assertFalse(consumer_ref.synchronized)
# Check kong
with self.assertRaises(ValueError):
_ = self.client.consumers.retrieve(kong_id)
def test_delete_consumer(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Make sure the remote username matches the locally known username
result = self.client.consumers.retrieve(consumer_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], consumer_ref.username)
# You can delete afterwards
consumer_kong_id = consumer_ref.kong_id
consumer_ref.delete()
# Check kong
with self.assertRaises(ValueError):
_ = self.client.consumers.retrieve(consumer_kong_id)
def test_sync_consumer_basic_auth(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check kong
amount = self.client.consumers.basic_auth(consumer_ref.kong_id).count()
self.assertEqual(amount, 0)
# Create auth
auth_ref = BasicAuthReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Reload
auth_ref = models.BasicAuthReference.objects.get(id=auth_ref.id)
self.assertIsNotNone(auth_ref.kong_id)
# Make sure the remote username matches the locally known username
result = self.client.consumers.basic_auth(consumer_ref.kong_id).retrieve(auth_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['username'], auth_ref.username)
self.assertIsNotNone(result['password'])
def test_sync_consumer_multiple_basic_auth(self):
amount = 3
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Create auths
auths = []
for i in range(amount):
auths.append(BasicAuthReferenceFactory(consumer=consumer_ref))
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.basic_auth(consumer_ref.kong_id).count(), amount)
# Reload
for i in range(len(auths)):
auths[i] = models.BasicAuthReference.objects.get(id=auths[i].id)
self.assertIsNotNone(auths[i].kong_id)
# Check kong
result = self.client.consumers.basic_auth(consumer_ref.kong_id).list()
self.assertIsNotNone(result)
self.assertEqual(
sorted([(uuid.UUID(r['id']), r['username']) for r in result['data']], key=lambda x: x[0]),
sorted([(obj.kong_id, obj.username) for obj in auths], key=lambda x: x[0]))
def test_withdraw_consumer_basic_auth(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Create auth
auth_ref = BasicAuthReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Reload
auth_ref = models.BasicAuthReference.objects.get(id=auth_ref.id)
self.assertIsNotNone(auth_ref.kong_id)
self.assertTrue(auth_ref.synchronized)
# Withdraw
logic.withdraw_consumer(self.client, consumer_ref)
self.assertFalse(consumer_ref.synchronized)
# Reload
auth_ref = models.BasicAuthReference.objects.get(id=auth_ref.id)
self.assertIsNone(auth_ref.kong_id)
self.assertFalse(auth_ref.synchronized)
def test_delete_consumer_basic_auth(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Create auth
auth_ref1 = BasicAuthReferenceFactory(consumer=consumer_ref)
BasicAuthReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.basic_auth(consumer_ref.kong_id).count(), 2)
# Delete auth_ref1
auth_ref1.delete()
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.basic_auth(consumer_ref.kong_id).count(), 1)
# Delete consumer
consumer_kong_id = consumer_ref.kong_id
consumer_ref.delete()
# Check
with self.assertRaises(ValueError):
self.client.consumers.basic_auth(consumer_kong_id).count()
def test_sync_consumer_key_auth(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check kong
amount = self.client.consumers.key_auth(consumer_ref.kong_id).count()
self.assertEqual(amount, 0)
# Create auth
auth_ref = KeyAuthReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Reload
auth_ref = models.KeyAuthReference.objects.get(id=auth_ref.id)
self.assertIsNotNone(auth_ref.kong_id)
# Check kong
result = self.client.consumers.key_auth(consumer_ref.kong_id).retrieve(auth_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['key'], auth_ref.key)
def test_sync_consumer_multiple_key_auth(self):
amount = 3
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Create auths
auths = []
for i in range(amount):
auths.append(KeyAuthReferenceFactory(consumer=consumer_ref))
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.key_auth(consumer_ref.kong_id).count(), amount)
# Reload
for i in range(len(auths)):
auths[i] = models.KeyAuthReference.objects.get(id=auths[i].id)
self.assertIsNotNone(auths[i].kong_id)
# Check kong
result = self.client.consumers.key_auth(consumer_ref.kong_id).list()
self.assertIsNotNone(result)
self.assertEqual(
sorted([(uuid.UUID(r['id']), r['key']) for r in result['data']], key=lambda x: x[0]),
sorted([(obj.kong_id, obj.key) for obj in auths], key=lambda x: x[0]))
def test_withdraw_consumer_key_auth(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Create auth
auth_ref = KeyAuthReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Reload
auth_ref = models.KeyAuthReference.objects.get(id=auth_ref.id)
self.assertIsNotNone(auth_ref.kong_id)
self.assertTrue(auth_ref.synchronized)
# Withdraw
logic.withdraw_consumer(self.client, consumer_ref)
self.assertFalse(consumer_ref.synchronized)
# Reload
auth_ref = models.KeyAuthReference.objects.get(id=auth_ref.id)
self.assertIsNone(auth_ref.kong_id)
self.assertFalse(auth_ref.synchronized)
def test_delete_consumer_key_auth(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Create auth
auth_ref1 = KeyAuthReferenceFactory(consumer=consumer_ref)
KeyAuthReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.key_auth(consumer_ref.kong_id).count(), 2)
# Delete auth_ref1
auth_ref1.delete()
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.key_auth(consumer_ref.kong_id).count(), 1)
# Delete consumer
consumer_kong_id = consumer_ref.kong_id
consumer_ref.delete()
# Check
with self.assertRaises(ValueError):
self.client.consumers.key_auth(consumer_kong_id).count()
def test_sync_consumer_oauth2(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check kong
amount = self.client.consumers.oauth2(consumer_ref.kong_id).count()
self.assertEqual(amount, 0)
# Create auth
auth_ref = OAuth2ReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Reload
auth_ref = models.OAuth2Reference.objects.get(id=auth_ref.id)
self.assertIsNotNone(auth_ref.kong_id)
# Check kong
result = self.client.consumers.oauth2(consumer_ref.kong_id).retrieve(auth_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['client_id'], auth_ref.client_id)
def test_sync_consumer_multiple_oauth2(self):
amount = 3
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Mark for auto cleanup
self._cleanup_afterwards(consumer_ref)
# Create auths
auths = []
for i in range(amount):
auths.append(OAuth2ReferenceFactory(consumer=consumer_ref))
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.oauth2(consumer_ref.kong_id).count(), amount)
# Reload
for i in range(len(auths)):
auths[i] = models.OAuth2Reference.objects.get(id=auths[i].id)
self.assertIsNotNone(auths[i].kong_id)
# Check kong
result = self.client.consumers.oauth2(consumer_ref.kong_id).list()
self.assertIsNotNone(result)
self.assertEqual(
sorted([(uuid.UUID(r['id']), r['client_id']) for r in result['data']], key=lambda x: x[0]),
sorted([(obj.kong_id, obj.client_id) for obj in auths], key=lambda x: x[0]))
def test_withdraw_consumer_oauth2(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Create auth
auth_ref = OAuth2ReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Reload
auth_ref = models.OAuth2Reference.objects.get(id=auth_ref.id)
self.assertIsNotNone(auth_ref.kong_id)
self.assertTrue(auth_ref.synchronized)
# Withdraw
logic.withdraw_consumer(self.client, consumer_ref)
self.assertFalse(consumer_ref.synchronized)
# Reload
auth_ref = models.OAuth2Reference.objects.get(id=auth_ref.id)
self.assertIsNone(auth_ref.kong_id)
self.assertFalse(auth_ref.synchronized)
def test_delete_consumer_oauth2(self):
# Create consumer_ref
consumer_ref = ConsumerReferenceFactory(username=fake.consumer_name())
# Create auth
auth_ref1 = OAuth2ReferenceFactory(consumer=consumer_ref)
OAuth2ReferenceFactory(consumer=consumer_ref)
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.oauth2(consumer_ref.kong_id).count(), 2)
# Delete auth_ref1
auth_ref1.delete()
# Publish
logic.synchronize_consumer(self.client, consumer_ref)
self.assertTrue(consumer_ref.synchronized)
# Check
self.assertEqual(self.client.consumers.oauth2(consumer_ref.kong_id).count(), 1)
# Delete consumer
consumer_kong_id = consumer_ref.kong_id
consumer_ref.delete()
# Check
with self.assertRaises(ValueError):
self.client.consumers.oauth2(consumer_kong_id).count()
def _cleanup_afterwards(self, consumer_ref):
self._cleanup_consumers.append(consumer_ref)
return consumer_ref
class AuthenticationPluginTestCase(TestCase):
def setUp(self):
self.client = get_kong_client()
self._cleanup_api = []
def tearDown(self):
self.client.close()
for api_ref in self._cleanup_api:
self.assertTrue(isinstance(api_ref, models.APIReference))
api_ref = models.APIReference.objects.get(id=api_ref.id) # reloads!!
logic.withdraw_api(self.client, api_ref)
def test_create_oauth2_plugin(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Create plugin_configuration_ref
plugin_configuration_ref = PluginConfigurationReferenceFactory(
api=api_ref, plugin=Plugins.OAUTH2_AUTHENTICATION, config={})
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Reload plugin configuration
plugin_configuration_ref = PluginConfigurationReference.objects.get(id=plugin_configuration_ref.id)
# Check
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertIsNotNone(result)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
def test_create_oauth2_plugin_with_scopes(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Create plugin_configuration_ref
plugin_configuration_ref = PluginConfigurationReferenceFactory(
api=api_ref, plugin=Plugins.OAUTH2_AUTHENTICATION, config={
'scopes': 'email,subscriptions,topups'
})
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Reload plugin configuration
plugin_configuration_ref = PluginConfigurationReference.objects.get(id=plugin_configuration_ref.id)
# Check
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertEqual(result['name'], Plugins.label(plugin_configuration_ref.plugin))
self.assertEqual(result['config']['scopes'], ['email', 'subscriptions', 'topups'])
def test_update_oauth2_plugin_with_scopes(self):
# Create api_ref
api_ref = APIReferenceFactory(upstream_url=fake.url(), request_host=fake.domain_name())
# Create plugin_configuration_ref
plugin_configuration_ref = PluginConfigurationReferenceFactory(
api=api_ref, plugin=Plugins.OAUTH2_AUTHENTICATION, config={})
# Mark for auto cleanup
self._cleanup_afterwards(api_ref)
# Publish api
logic.synchronize_api(self.client, api_ref)
# Reload plugin configuration
plugin_configuration_ref = PluginConfigurationReference.objects.get(id=plugin_configuration_ref.id)
# Update plugin_configuration_ref
plugin_configuration_ref.config = dict({
'scopes': 'email,subscriptions,topups'
}, **plugin_configuration_ref.config)
plugin_configuration_ref.save()
# Publish api
logic.synchronize_api(self.client, api_ref)
# Reload plugin configuration
plugin_configuration_ref = PluginConfigurationReference.objects.get(id=plugin_configuration_ref.id)
# Check
result = self.client.apis.plugins(api_ref.kong_id).retrieve(plugin_configuration_ref.kong_id)
self.assertEqual(result['config']['scopes'], ['email', 'subscriptions', 'topups'])
def _cleanup_afterwards(self, api_ref):
self._cleanup_api.append(api_ref)
return api_ref
| bsd-3-clause | 1,250,891,053,898,875,100 | 35.840717 | 118 | 0.665025 | false |
Scapogo/zipline | zipline/utils/serialization_utils.py | 2 | 3684 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import BytesIO
import pickle
from functools import partial
from zipline.assets import AssetFinder
from zipline.finance.trading import TradingEnvironment
# Label for the serialization version field in the state returned by
# __getstate__.
VERSION_LABEL = '_stateversion_'
CHECKSUM_KEY = '__state_checksum'
def _persistent_id(obj):
if isinstance(obj, AssetFinder):
return AssetFinder.PERSISTENT_TOKEN
if isinstance(obj, TradingEnvironment):
return TradingEnvironment.PERSISTENT_TOKEN
return None
def _persistent_load(persid, env):
if persid == AssetFinder.PERSISTENT_TOKEN:
return env.asset_finder
if persid == TradingEnvironment.PERSISTENT_TOKEN:
return env
def dumps_with_persistent_ids(obj, protocol=None):
"""
Performs a pickle dumps on the given object, substituting all references to
a TradingEnvironment or AssetFinder with tokenized representations.
All arguments are passed to pickle.Pickler and are described therein.
"""
file = BytesIO()
pickler = pickle.Pickler(file, protocol)
pickler.persistent_id = _persistent_id
pickler.dump(obj)
return file.getvalue()
def loads_with_persistent_ids(str, env):
"""
Performs a pickle loads on the given string, substituting the given
TradingEnvironment in to any tokenized representations of a
TradingEnvironment or AssetFinder.
Parameters
----------
str : String
The string representation of the object to be unpickled.
env : TradingEnvironment
The TradingEnvironment to be inserted to the unpickled object.
Returns
-------
obj
An unpickled object formed from the parameter 'str'.
"""
file = BytesIO(str)
unpickler = pickle.Unpickler(file)
unpickler.persistent_load = partial(_persistent_load, env=env)
return unpickler.load()
def load_context(state_file_path, context, checksum):
with open(state_file_path, 'rb') as f:
try:
loaded_state = pickle.load(f)
except (pickle.UnpicklingError, IndexError):
raise ValueError("Corrupt state file: {}".format(state_file_path))
else:
if CHECKSUM_KEY not in loaded_state or \
loaded_state[CHECKSUM_KEY] != checksum:
raise TypeError("Checksum mismatch during state load. "
"The given state file was not created "
"for the algorithm in use")
else:
del loaded_state[CHECKSUM_KEY]
for k, v in loaded_state.items():
setattr(context, k, v)
def store_context(state_file_path, context, checksum, exclude_list):
state = {}
fields_to_store = list(set(context.__dict__.keys()) -
set(exclude_list))
for field in fields_to_store:
state[field] = getattr(context, field)
state[CHECKSUM_KEY] = checksum
with open(state_file_path, 'wb') as f:
# Forcing v2 protocol for compatibility between py2 and py3
pickle.dump(state, f, protocol=2)
| apache-2.0 | 6,949,588,704,520,836,000 | 31.60177 | 79 | 0.671824 | false |
slaweet/autoskola | main/geography/views/question.py | 1 | 4283 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import Http404, HttpResponseBadRequest
from django.utils import simplejson
from geography.models import Place, PlaceRelation, UserPlace, AveragePlace, ABEnvironment
from geography.utils import JsonResponse, QuestionService
from lazysignup.decorators import allow_lazy_user
from logging import getLogger
from ipware.ip import get_ip
from geography.models.averageknowledge import AverageKnowledge
from math import exp
LOGGER = getLogger(__name__)
@allow_lazy_user
def question(request, map_code, place_type_slug):
try:
map = PlaceRelation.objects.get(
place__code=map_code,
type=PlaceRelation.IS_ON_MAP)
except PlaceRelation.DoesNotExist:
raise Http404
ABEnvironment.init_session(request.user, request.session)
qs = QuestionService(user=request.user, map_place=map, ab_env=ABEnvironment(request))
question_index = 0
if request.raw_post_data:
LOGGER.debug("processing raw answer %s", request.raw_post_data)
answer = simplejson.loads(request.raw_post_data)
qs.answer(answer, get_ip(request))
question_index = answer['index'] + 1
place_types = ([Place.PLACE_TYPE_SLUGS_LOWER_REVERSE[place_type_slug]]
if place_type_slug in Place.PLACE_TYPE_SLUGS_LOWER_REVERSE
else Place.CATEGORIES[place_type_slug]
if place_type_slug in Place.CATEGORIES
else [t[0] for t in Place.PLACE_TYPES])
if place_type_slug == 'test':
if question_index == 0:
response = qs.get_test()
else:
response = []
else:
response = qs.get_questions(10 - question_index, place_types)
return JsonResponse(response)
def average_users_places(request, map_code):
response = {}
return JsonResponse(response)
@allow_lazy_user
def users_places(request, map_code, user=None):
try:
map = PlaceRelation.objects.get(
place__code=map_code,
type=PlaceRelation.IS_ON_MAP)
map_places = map.related_places.all()
except PlaceRelation.DoesNotExist:
raise Http404("Unknown map name: {0}".format(map_code))
try:
too_small_places = PlaceRelation.objects.get(
place__code=map_code,
type=PlaceRelation.IS_TOO_SMALL_ON_MAP)
map_places = map_places | too_small_places.related_places.all()
except PlaceRelation.DoesNotExist:
pass
if not user:
user = request.user
elif user == "average":
pass
else:
try:
user = User.objects.get(username=user)
except User.DoesNotExist:
raise HttpResponseBadRequest("Invalid username: {0}" % user)
if user == "average":
ps = AveragePlace.objects.for_map(map_places)
elif request.user.is_authenticated():
ps = UserPlace.objects.for_user_and_map_prepared(user, map)
else:
ps = []
ps = list(ps)
response = {
'expectedPoints': expectedPoints(user, map),
'name': map.place.name,
'placesTypes': [
{
'name': place_type[1],
'slug': Place.PLACE_TYPE_SLUGS_LOWER[place_type[0]],
'countInTest': Place.TEST_COMPOSITION[place_type[0]][1],
'pointsInTest': Place.TEST_COMPOSITION[place_type[0]][2],
'places': [p.to_serializable() for p in ps
if hasattr(p, 'type') and p.type == place_type[0] or
not hasattr(p, 'type') and p.place.type == place_type[0]]
} for place_type in Place.PLACE_TYPE_PLURALS
]
}
response['placesTypes'] = [pt for pt in response['placesTypes']
if len(pt['places']) > 0]
LOGGER.info(
u"users_places: previewed map '{0}' of user '{1}' with '{2}' places".
format(map.place.name, user, len(list(ps))))
return JsonResponse(response)
def expectedPoints(user, map):
aks = AverageKnowledge.objects.for_user_and_map_prepared(user, map)
points = 0
for ak in aks:
tc = Place.TEST_COMPOSITION[ak.type]
points += tc[1] * tc[2] * 1.0 / (1 + exp(-ak.skill))
return round(points)
| mit | -2,900,680,679,705,237,000 | 35.606838 | 89 | 0.618959 | false |
wgwoods/anaconda | pyanaconda/threads.py | 3 | 9223 | #
# threads.py: anaconda thread management
#
# Copyright (C) 2012
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Chris Lumens <[email protected]>
#
import logging
log = logging.getLogger("anaconda")
import threading
_WORKER_THREAD_PREFIX = "AnaWorkerThread"
class ThreadManager(object):
"""A singleton class for managing threads and processes.
Notes:
THE INSTANCE HAS TO BE CREATED IN THE MAIN THREAD!
This manager makes one assumption that contradicts python's
threading module documentation. In this class, we assume that thread
names are unique and meaningful. This is an okay assumption for us
to make given that anaconda is only ever going to have a handful of
special purpose threads.
"""
def __init__(self):
self._objs = {}
self._objs_lock = threading.RLock()
self._errors = {}
self._errors_lock = threading.RLock()
self._main_thread = threading.current_thread()
def __call__(self):
return self
def add(self, obj):
"""Given a Thread or Process object, add it to the list of known objects
and start it. It is assumed that obj.name is unique and descriptive.
"""
# we need to lock the thread dictionary when adding a new thread,
# so that callers can't get & join threads that are not yet started
with self._objs_lock:
if obj.name in self._objs:
raise KeyError("Cannot add thread '%s', a thread with the same name already running" % obj.name)
self._objs[obj.name] = obj
obj.start()
return obj.name
def remove(self, name):
"""Removes a thread from the list of known objects. This should only
be called when a thread exits, or there will be no way to get a
handle on it.
"""
with self._objs_lock:
self._objs.pop(name)
def exists(self, name):
"""Determine if a thread or process exists with the given name."""
# thread in the ThreadManager only officially exists once started
with self._objs_lock:
return name in self._objs
def get(self, name):
"""Given an object name, see if it exists and return the object.
Return None if no such object exists. Additionally, this method
will re-raise any uncaught exception in the thread.
"""
# without the lock it would be possible to get & join
# a thread that was not yet started
with self._objs_lock:
obj = self._objs.get(name)
if obj:
self.raise_if_error(name)
return obj
def wait(self, name):
"""Wait for the thread to exit and if the thread exited with an error
re-raise it here.
"""
ret_val = True
# we don't need a lock here,
# because get() acquires it itself
try:
self.get(name).join()
except AttributeError:
ret_val = False
# - if there is a thread object for the given name,
# we join it
# - if there is not a thread object for the given name,
# we get None, try to join it, suppress the AttributeError
# and return immediately
self.raise_if_error(name)
# return True if we waited for the thread, False otherwise
return ret_val
def wait_all(self):
"""Wait for all threads to exit and if there was an error re-raise it.
"""
with self._objs_lock:
names = list(self._objs.keys())
for name in names:
if self.get(name) == threading.current_thread():
continue
log.debug("Waiting for thread %s to exit", name)
self.wait(name)
if self.any_errors:
with self._errors_lock:
thread_names = ", ".join(thread_name for thread_name in self._errors.keys()
if self._errors[thread_name])
msg = "Unhandled errors from the following threads detected: %s" % thread_names
raise RuntimeError(msg)
def set_error(self, name, *exc_info):
"""Set the error data for a thread
The exception data is expected to be the tuple from sys.exc_info()
"""
with self._errors_lock:
self._errors[name] = exc_info
def get_error(self, name):
"""Get the error data for a thread using its name
"""
return self._errors.get(name)
@property
def any_errors(self):
"""Return True of there have been any errors in any threads
"""
with self._errors_lock:
return any(self._errors.values())
def raise_if_error(self, name):
"""If a thread has failed due to an exception, raise it into the main
thread and remove it from errors.
"""
if name not in self._errors:
# no errors found for the thread
return
with self._errors_lock:
exc_info = self._errors.pop(name)
if exc_info:
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
def in_main_thread(self):
"""Return True if it is run in the main thread."""
cur_thread = threading.current_thread()
return cur_thread is self._main_thread
@property
def running(self):
""" Return the number of running threads.
:returns: number of running threads
:rtype: int
"""
with self._objs_lock:
return len(self._objs)
@property
def names(self):
""" Return the names of the running threads.
:returns: list of thread names
:rtype: list of strings
"""
with self._objs_lock:
return list(self._objs.keys())
def wait_for_error_threads(self):
"""
Waits for all threads that caused exceptions. In other words, waits for
exception handling (possibly interactive) to be finished.
"""
with self._errors_lock:
for thread_name in self._errors.keys():
thread = self._objs[thread_name]
thread.join()
class AnacondaThread(threading.Thread):
"""A threading.Thread subclass that exists only for a couple purposes:
(1) Make exceptions that happen in a thread invoke our exception handling
code as well. Otherwise, threads will silently die and we are doing
a lot of complicated code in them now.
(2) Remove themselves from the thread manager when completed.
(3) All created threads are made daemonic, which means anaconda will quit
when the main process is killed.
"""
# class-wide dictionary ensuring unique thread names
_prefix_thread_counts = dict()
def __init__(self, *args, **kwargs):
# if neither name nor prefix is given, use the worker prefix
if "name" not in kwargs and "prefix" not in kwargs:
kwargs["prefix"] = _WORKER_THREAD_PREFIX
# if prefix is specified, use it to construct new thread name
prefix = kwargs.pop("prefix", None)
if prefix:
thread_num = self._prefix_thread_counts.get(prefix, 0) + 1
self._prefix_thread_counts[prefix] = thread_num
kwargs["name"] = prefix + str(thread_num)
if "fatal" in kwargs:
self._fatal = kwargs.pop("fatal")
else:
self._fatal = True
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
def run(self, *args, **kwargs):
# http://bugs.python.org/issue1230540#msg25696
import sys
log.info("Running Thread: %s (%s)", self.name, self.ident)
try:
threading.Thread.run(self, *args, **kwargs)
# pylint: disable=bare-except
except:
if self._fatal:
sys.excepthook(*sys.exc_info())
else:
threadMgr.set_error(self.name, *sys.exc_info())
finally:
threadMgr.remove(self.name)
log.info("Thread Done: %s (%s)", self.name, self.ident)
def initThreading():
"""Set up threading for anaconda's use. This method must be called before
any GTK or threading code is called, or else threads will only run when
an event is triggered in the GTK main loop. And IT HAS TO BE CALLED IN
THE MAIN THREAD.
"""
global threadMgr
threadMgr = ThreadManager()
threadMgr = None
| gpl-2.0 | 6,029,410,370,755,598,000 | 32.783883 | 112 | 0.600564 | false |
cjPrograms/miniQuest | miniQuest.py | 1 | 19604 | import cmd
import textwrap
import cave
# miniQuest Text - Written by Cody Cooper with the help of Al Sweigart (http://inventwithpython.com)
DESC = 'desc'
NORTH = 'north'
SOUTH = 'south'
EAST = 'east'
WEST = 'west'
CHESTS = 'chests'
CHESTDESC = 'chestdesc'
SHORTDESC = 'shortdesc'
LONGDESC = 'longdesc'
TAKEABLE = 'takeable'
EDIBLE = 'edible'
DESC_WORDS = 'desc_words'
SHOP = 'shop'
BUYPRICE = 'buyprice'
SELLPRICE = 'sellprice'
SCREEN_WIDTH = 80
world_rooms = {
'Your House': {
DESC: 'You live here! A small, cozy house. Your front door takes you right in to the market!',
NORTH: 'Market',
CHESTS: ['Health Potion']},
'Market': {
DESC: 'The market is bustling!',
NORTH: 'Cave Entrance',
EAST: 'Blacksmith',
SOUTH: 'Your House',
WEST: 'Apothecary',
CHESTS: []},
'Cave Entrance': {
DESC: 'The entrance to the cave!',
SOUTH: 'Market',
CHESTS: []},
'Blacksmith': {
DESC: 'The blacksmith! Buy weapons and armor here.',
WEST: 'Market',
SHOP: ['Health Potion'],
CHESTS: []},
'Apothecary': {
DESC: 'The apothecary! Buy potions here.',
EAST: 'Market',
SHOP: ['Health Potion'],
CHESTS: []},
}
world_items = {
'Health Potion': {
CHESTDESC: 'A health potion! Drink to restore your health!',
SHORTDESC: 'a health potion',
LONGDESC: 'A health potion! Type drink health to use!',
EDIBLE: True,
DESC_WORDS: ['health', 'potion', 'health potion'],
BUYPRICE: 25,
SELLPRICE: 5},
}
location = 'Your House' # Start in your house
inventory = [] # Start with nothing in inventory
show_full_exits = True
def display_location(loc):
"""A helper function for displaying an area's description and exits."""
# Print location
print(loc)
print('~' * len(loc))
# Print the location's description using textwrap
print('\n'.join(textwrap.wrap(world_rooms[loc][DESC], SCREEN_WIDTH)))
# Print all chests and list what items are inside of them
if len(world_rooms[loc][CHESTS]) > 0:
print()
for item in world_rooms[loc][CHESTS]:
print('There\'s a chest with', world_items[item][SHORTDESC], 'inside of it!')
# Print all the exits
exits = []
for direction in (NORTH, SOUTH, EAST, WEST):
if direction in world_rooms[loc].keys():
exits.append(direction.title())
print()
if show_full_exits:
for direction in (NORTH, SOUTH, EAST, WEST):
if direction in world_rooms[location]:
print('%s: %s' % (direction.title(), world_rooms[location][direction]))
else:
print('Exits: %s' % ' '.join(exits))
def move_direction(direction):
"""A helper function that changes the location of the player."""
global location
if direction in world_rooms[location]:
print('You move to the %s.' % direction)
location = world_rooms[location][direction]
display_location(location)
else:
print('You cannot move in that direction')
def get_all_desc_words(item_list):
"""Returns a list of 'description words' for each item named in item_list."""
item_list = list(set(item_list)) # Make item_list unique
desc_words = []
for item in item_list:
desc_words.extend(world_items[item][DESC_WORDS])
return list(set(desc_words))
def get_all_first_desc_words(item_list):
"""Returns a list of the first 'description word' in the list of
description words for each item named in item_list"""
item_list = list(set(item_list)) # Make item_list unique
desc_words = []
for item in item_list:
desc_words.append(world_items[item][DESC_WORDS][0])
return list(set(desc_words))
def get_first_item_matching_desc(desc, item_list):
item_list = list(set(item_list)) # Make item_list unique
for item in item_list:
if desc in world_items[item][DESC_WORDS]:
return item
return None
def get_all_items_matching_desc(desc, item_list):
item_list = list(set(item_list)) # Make item_list unique
matching_items = []
for item in item_list:
if desc in world_items[item][DESC_WORDS]:
matching_items.append(item)
return matching_items
class TextAdventureCmd(cmd.Cmd):
prompt = '\n> '
# The default() method is called when none of the other do_*() command methods match.
def default(self, arg):
print('I do not understand that command. Type "help" for a list of commands.')
# A very simple "quit" command to terminate the program:
def do_quit(self, arg):
"""Quit th game"""
return True # This exits the cmd application loop in TextAdventureCmd.cmdloop()
def help_combat(self):
print('Not yet implemented')
def do_north(self, arg):
"""Move north"""
move_direction('north')
def do_south(self, arg):
"""Move south"""
move_direction('south')
def do_east(self, arg):
"""Move south"""
move_direction('east')
def do_west(self, arg):
"""move west"""
move_direction('west')
do_n = do_north
do_s = do_south
do_e = do_east
do_w = do_west
def do_exits(self, arg):
"""Toggle showing full exit descriptions or brief exit descriptions."""
global show_full_exits
show_full_exits = not show_full_exits
if show_full_exits:
print('Showing full exit descriptions.')
else:
print('Showing brief exit descriptions')
def do_inventory(self, arg):
"""Display a list of the items in your possession"""
if len(inventory) == 0:
print('Inventory:\n (nothing)')
return
# first get a count of each distinct item in the inventory
item_count = {}
for item in inventory:
if item in item_count.keys():
item_count[item] += 1
else:
item_count[item] = 1
# get a list of inventory items with duplicates removed:
print('Inventory:')
for item in set(inventory):
if item_count[item] > 1:
print(' %s (%s)' % (item, item_count[item]))
else:
print(' ' + item)
do_inv = do_inventory
def do_take(self, arg):
"""Take <item> - take an item from a chest."""
# Put this value in a more suitably named variable
item_to_take = arg.lower()
if item_to_take == '':
print ('Take what?')
return
cant_take = False
# Get the item name that the player's command describes
for item in get_all_items_matching_desc(item_to_take, world_rooms[location][CHESTS]):
if not world_items[item].get(TAKEABLE, True):
cant_take = True
continue # There may be other items named this that you can take, so continue checking.
print('You take %s.' % (world_items[item][SHORTDESC]))
world_rooms[location][CHESTS].remove(item) # Remove from the ground
inventory.append(item) # add to inventory
return
if cant_take:
print('You can\'t take "%s".' % item_to_take)
else:
print('That is not in the chest.')
def do_drop(self, arg):
"""drop <item> - drop an item from your inventory on to the ground."""
# put this value in a more suitably named variable
item_to_drop = arg.lower()
# get a list of all "Description words" for each item in the inventory
inv_desc_words = get_all_desc_words(inventory)
# find out if the player doesn't have that item
if item_to_drop not in inv_desc_words:
print('You do not have "%s" in your inventory.' % item_to_drop)
return
# get the item name that the player's command describes
item = get_first_item_matching_desc(item_to_drop, inventory)
if item is not None:
print('You drop %s.' % (world_items[item][SHORTDESC]))
inventory.remove(item) # remove from inventory
world_rooms[location][CHESTS].append(item) # add to the ground
def complete_take(self, text, line, begidx, endidx):
possible_items = []
text = text.lower()
# if the user has only typed 'take' but no item name:
if not text:
return get_all_first_desc_words(world_rooms[location][CHESTS])
# otherwise, get a list of description words for ground items matching the command text so far.
for item in list(set(world_rooms[location][CHESTS])):
for desc_word in world_items[item][DESC_WORDS]:
if desc_word.startswith(text) and world_items[item].get(TAKEABLE, True):
possible_items.append(desc_word)
return list(set(possible_items)) # make list unique
def complete_drop(self, text, line, begidx, endidx):
possible_items = []
item_to_drop = text.lower()
# get a list of all description words for each item in the inventory
inv_desc_words = get_all_desc_words(inventory)
for desc_word in inv_desc_words:
if line.startswith('drop %s' % desc_word):
return [] # command is complete.
# if the use has only typed drop but no item name:
if item_to_drop == '':
return get_all_first_desc_words(inventory)
# otherwise, get a list of all 'description words' for inventory items matcing the command text so far.
for desc_word in inv_desc_words:
if desc_word.startswith(text):
possible_items.append(desc_word)
return list(set(possible_items)) # make list unique.
def do_look(self, arg):
"""Look at an item, description, or the area:
'look' - display the current area's description
'look <direction>' - display the description of the area in that directio
'look exits' - display the description of all adjacent areas
'look <item> - display the description of an item in a chest or in your inventory"""
looking_at = arg.lower()
if looking_at == '':
# look will re print the area description
display_location(location)
return
if looking_at == 'exits':
for direction in (NORTH, SOUTH, EAST, WEST):
if direction in world_rooms[location]:
print('%s: %s' % (direction.title(), world_rooms[location][direction]))
return
if looking_at in ('north', 'west', 'east', 'south', 'n', 'e', 's', 'w'):
if looking_at.startswith('n') and NORTH in world_rooms[location]:
print(world_rooms[location][NORTH])
elif looking_at.startswith('w') and WEST in world_rooms[location]:
print(world_rooms[location][WEST])
elif looking_at.startswith('e') and EAST in world_rooms[location]:
print(world_rooms[location][EAST])
elif looking_at.startswith('s') and SOUTH in world_rooms[location]:
print(world_rooms[location][SOUTH])
else:
print('There is nothing in that direction.')
return
# see if the item being looked at is on the ground at this location
item = get_first_item_matching_desc(looking_at, world_rooms[location][CHESTS])
if item is not None:
print('\n'.join(textwrap.wrap(world_items[item][LONGDESC], SCREEN_WIDTH)))
return
# see if the item being looked at is in the inventory
item = get_first_item_matching_desc(looking_at, inventory)
if item is not None:
print('\n'.join(textwrap.wrap(world_items[item][LONGDESC], SCREEN_WIDTH)))
return
print('You do not see that nearby.')
def complete_look(self, text, line, begidx, endidx):
possible_items = []
looking_at = text.lower()
# get a list of all description words for each item in the inventory.
inv_desc_words = get_all_desc_words(inventory)
ground_desc_words = get_all_desc_words(world_rooms[location][CHESTS])
shop_desc_words = get_all_desc_words(world_rooms[location].get(SHOP, []))
for desc_word in inv_desc_words + ground_desc_words + shop_desc_words + [NORTH, SOUTH, EAST, WEST]:
if line.startswith('look %s' % desc_word):
return [] # command is complete
# if the user has only typed "look" but no item name, show all items in chest, shop, and directions
if looking_at == '':
possible_items.extend(get_all_first_desc_words(world_rooms[location][CHESTS]))
possible_items.extend(get_all_first_desc_words(world_rooms[location].get(SHOP, [])))
for direction in (NORTH, SOUTH, EAST, WEST):
if direction in world_rooms[location]:
possible_items.append(direction)
return list(set(possible_items)) # make list unique
# otherwise, get a list of all description words for ground items matching the command text so far.
for desc_word in ground_desc_words:
if desc_word.startswith(looking_at):
possible_items.append(desc_word)
# otherwise, get a list of all description words for items for sale at the shop ( if this is one )
for desc_word in shop_desc_words:
if desc_word.startswith(looking_at):
possible_items.append(desc_word)
# check for matching directions
for direction in (NORTH, SOUTH, EAST, WEST):
if direction.startswith(looking_at):
possible_items.append(direction)
# get a list of all description words for inventory items matching the command text so far
for desc_word in inv_desc_words:
if desc_word.startswith(looking_at):
possible_items.append(desc_word)
return list(set(possible_items)) # make list unique
def do_list(self, arg):
"""List the items for sale at the current location's shop. "list full" will show details of the items."""
if SHOP not in world_rooms[location]:
print('This is not a shop.')
return
arg = arg.lower()
print('For sale:')
for item in world_rooms[location][SHOP]:
print(' - %s' % item)
if arg == 'full':
print('\n'.join(textwrap.wrap(world_items[item][LONGDESC], SCREEN_WIDTH)))
def do_buy(self, arg):
"""buy <item> - buy an item at the current location's shop."""
if SHOP not in world_rooms[location]:
print('This is not a shop.')
return
item_to_buy = arg.lower()
if item_to_buy == '':
print('Buy what? Type "list" or "list full" to see a list of items for sale.')
return
item = get_first_item_matching_desc(item_to_buy, world_rooms[location][SHOP])
if item != None:
# Check gold/remove gold here.
print('You have purchased %s' % (world_items[item][SHORTDESC]))
inventory.append(item)
return
print('"%s" is not sold here. Type "list" or "list full" to see a list of items for sale.' % item_to_buy)
def complete_buy(self, text, line, begidx, endidx):
if SHOP not in world_rooms[location]:
return []
item_to_buy = text.lower()
possible_items = []
# if the user has only typed "buy" but no item name:
if not item_to_buy:
return get_all_first_desc_words(world_rooms[location][SHOP])
# otherwise, get a list of all "description words" for shop items matching the command text so far:
for item in list(set(world_rooms[location][SHOP])):
for desc_word in world_items[item][DESC_WORDS]:
if desc_word.startswith(text):
possible_items.append(desc_word)
return list(set(possible_items)) # make list unique
def do_sell(self, arg):
""""sell <item>" - sell an item at the current location's shop."""
if SHOP not in world_rooms[location]:
print('This is not a shop.')
return
item_to_sell = arg.lower()
if item_to_sell == '':
print('Sell what? Type "inventory" or "inv" to see your inventory.')
return
for item in inventory:
if item_to_sell in world_items[item][DESC_WORDS]:
# implement check price/add gold here
print('You have sold %s' % (world_items[item][SHORTDESC]))
inventory.remove(item)
return
print('You do not have "%s". Type "inventory" or "inv" to see your inventory.' % item_to_sell)
def complete_sell(self, text, line, begidx, endidx):
if SHOP not in world_rooms[location]:
return []
item_to_sell = text.lower()
possible_items = []
# if the user has only typed "sell" but no item name:
if not item_to_sell:
return get_all_first_desc_words(inventory)
# otherwise, get a list of all "description words" for inventory items matching the command text so far:
for item in list(set(inventory)):
for desc_word in world_items[item][DESC_WORDS]:
if desc_word.startswith(text):
possible_items.append(desc_word)
return list(set(possible_items)) # make list unique
def do_drink(self, arg):
""""drink <item>" - drink an item in your inventory."""
item_to_drink = arg.lower()
if item_to_drink == '':
print('drink what? Type "inventory" or "inv" to see your inventory.')
return
cant_drink = False
for item in get_all_items_matching_desc(item_to_drink, inventory):
if not world_items[item].get(EDIBLE, False):
cant_drink = True
continue
# check food/add health here
print('You drink %s' % (world_items[item][SHORTDESC]))
inventory.remove(item)
return
if cant_drink:
print('You cannot drink that.')
else:
print('You do not have "%s". Type "inventory" or "inv" to see your inventory.' % item_to_drink)
def complete_drink(self, text, line, begidx, endidx):
item_to_drink = text.lower()
possible_items = []
# if the user has only typed "drink" but no item name:
if item_to_drink == '':
return get_all_first_desc_words(inventory)
# otherwise, get a list of all "description words" for edible inventory items matching the command text so far:
for item in list(set(inventory)):
for desc_word in world_items[item][DESC_WORDS]:
if desc_word.startswith(text) and world_items[item].get(EDIBLE, False):
possible_items.append(desc_word)
return list(set(possible_items)) # make list unique
def do_enter(self, args):
if location == 'Cave Entrance':
cave.enter_cave()
else:
return
if __name__ == '__main__':
print('Mini Quest Demo!')
print('================')
print()
print('(Type "help" for commands.)')
print()
display_location(location)
TextAdventureCmd().cmdloop()
print('Thanks for playing!')
| gpl-2.0 | 6,900,770,017,640,478,000 | 35.036765 | 119 | 0.588961 | false |
thatch45/maras | setup.py | 1 | 1205 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import python libs
import os
import sys
# Import maras libs
import maras
if 'USE_SETUPTOOLS' in os.environ or 'setuptools' in sys.modules:
from setuptools import setup
else:
from distutils.core import setup
NAME = 'maras'
DESC = ('Pure python distributed database (engine first)')
VERSION = maras.version
setup(name=NAME,
version=VERSION,
description=DESC,
author='Thomas S Hatch',
author_email='[email protected]',
url='https://github.com/thatch45/maras',
classifiers=[
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Development Status :: 2 - Pre-Alpha',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Intended Audience :: Developers',
],
packages=[
'maras',
'maras.index',
'maras.stor',
'maras.utils',
]
)
| apache-2.0 | -1,446,448,607,595,010,000 | 27.690476 | 65 | 0.587552 | false |
conejoninja/plugin.video.pelisalacarta | pelisalacarta/channels/newhd.py | 6 | 12293 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para newhd
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "newhd"
__category__ = "F"
__type__ = "generic"
__title__ = "NewHD"
__language__ = "ES"
__creationdate__ = "20110505"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[newhd.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Español", action="idioma", url="http://www.newhd.org/"))
itemlist.append( Item(channel=__channel__, title="Inglés", action="idioma", url="http://www.newhd.org/en/"))
itemlist.append( Item(channel=__channel__, title="Latino", action="idioma", url="http://www.newhd.org/lat/"))
itemlist.append( Item(channel=__channel__, title="VOS", action="novedades", url="http://www.newhd.org/sub/"))
itemlist.append( Item(channel=__channel__, title="Buscar" ,action="search" , url="http://www.newhd.org/"))
return itemlist
def idioma(item):
logger.info("[newhd.py] idioma")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Novedades", action="novedades", url=item.url+"index.php?do=cat&category=online"))
itemlist.append( Item(channel=__channel__, title="Listado Alfabético", action="alfa", url=item.url+"index.php?do=cat&category=online"))
itemlist.append( Item(channel=__channel__, title="Listado por Categorías", action="cat", url=item.url+"index.php?do=cat&category=online"))
return itemlist
def search(item,texto):
logger.info("[newhd.py] search")
itemlist = []
item.url = "http://www.newhd.org/index.php?do=search&subaction=search&story=%s" %texto
itemlist.extend(searchlist(item))
return itemlist
def novedades(item):
logger.info("[newhd.py] novedades")
# Descarga la página
data = scrapertools.cachePage(item.url)
# Extrae las entradas
'''
<table width="100%" border="0" cellspacing="0" cellpadding="0" style="background-color:#ffffff;cursor:pointer;"
id="9111"
onmouseover="colorFade('9111','background','ffffff','eff6f9')"
onmouseout="colorFade('9111','background','eff6f9','ffffff',25,50)">
<tr valign="middle">
<td width="1%" class="box" bgcolor="#FFFFFF"><div onClick="desplegarContraer('911',this);" class="linkContraido"><img src="/templates/newhd/images/mas.png" border="0"></div></td>
<td width="85%" height="100%" class="box"><div onClick="desplegarContraer('911',this);" class="linkContraido"> <font color="#83a0ba"><a>Salvar al soldado Ryan</a></font> </div></td>
<td width="14%" align="right"><div align="right"><a href="http://www.newhd.org/online/online-belico/911-salvar-al-soldado-ryan.html"><img src="/templates/newhd/images/completo.png" onMouseOver="this.src='/templates/newhd/images/completoon.png';" onMouseOut="this.src='/templates/newhd/images/completo.png';" width="129" height="15" border="0"/></a></div></td>
</tr>
<td height="1" colspan="4" background="/templates/newhd/images/dotted.gif"><img src="/templates/newhd/images/spacer.gif" width="1" height="1" /></td>
</tr>
</table>
<div id="911" class='elementoOculto'><table width="100%" class="box"><br><tr>
<td width="14%" rowspan="6" align="left" valign="top"><img src="/uploads/thumbs/1319662843_salvar_al_soldado_ryan-738956437-large.jpg" width="112" height="154" border="0" align="top" /></td>
<td height="122" colspan="4" valign="top"><div id="news-id-911" style="display:inline;">Durante la invasión de Normandía, en plena Segunda Guerra Mundial, a un grupo de soldados americanos se le encomienda una peligrosa misión: poner a salvo al soldado James Ryan. Los hombres de la patrulla del capitán John Miller deben arriesgar sus vidas para encontrar a este soldado, cuyos tres hermanos han muerto en la guerra. Lo único que se sabe del soldado Ryan es que se lanzó con su escuadrón de paracaidistas detrás de las líneas enemigas.</div><font style="text-transform: uppercase;"> </font></td>
<tr>
<tr>
<td height="20" valign="bottom" class="rating"><img src="/templates/newhd/images/floder.gif" width="20" height="16" align="absbottom" /> Category: <font style="text-transform: uppercase;"><a href="http://www.newhd.org/online/">HD Online</a> » <a href="http://www.newhd.org/online/online-belico/">Belico</a></font></td>
<td align="right" valign="bottom"> <a href="http://nowtrailer.tv/view/1060/Saving-Private-Ryan-1998-Official-Trailer.html" target="_blank"><img src="/templates/newhd/images/trailer.gif" alt="Trailer" width="37" height="15" border="0"></a> </td>
<tr>
<td height="1" background="/templates/newhd/images/dot_dark.gif"></td>
<td height="1" background="/templates/newhd/images/dot_dark.gif"></td>
<tr>
<td width="73%" height="20" valign="bottom" class="rating"><div id='ratig-layer-911'><div class="rating" style="float:left;">
<ul class="unit-rating">
<li class="current-rating" style="width:0px;">0</li>
<li><a href="#" title="Bad" class="r1-unit" onclick="dleRate('1', '911'); return false;">1</a></li>
<li><a href="#" title="Poor" class="r2-unit" onclick="dleRate('2', '911'); return false;">2</a></li>
<li><a href="#" title="Fair" class="r3-unit" onclick="dleRate('3', '911'); return false;">3</a></li>
<li><a href="#" title="Good" class="r4-unit" onclick="dleRate('4', '911'); return false;">4</a></li>
<li><a href="#" title="Excellent" class="r5-unit" onclick="dleRate('5', '911'); return false;">5</a></li>
</ul>
</div>
patron = '<table width="100\%" border="0" cellspacing="0" cellpadding="0".*?'
patron += '<font[^<]+<a>([^<]+)</a>.*?'
patron += '<a href="(http://www.newhd.org/online/[^"]+)"><img.*?<img.*?'
patron += '<img src="([^"]+)".*?'
patron += '<div id="news-id[^"]+" style="display\:inline\;">([^<]+)<'
'''
patron = '<tr valign="middle">(.*?)</a></font></td>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
try:
scrapedurl = re.compile(r'href="(.+?)"').findall(match)[0]
except:continue
try:
scrapedtitle = re.compile(r'<a>(.+?)</a>').findall(match)[0]
except:
scrapedtitle = "untitle"
try:
scrapedthumbnail = urlparse.urljoin(item.url,re.compile(r'html"><img src="([^"]+)" width=').findall(match)[0])
except:
scrapedthumbnail = ""
try:
scrapedplot = re.compile(r'(<td height="122".+?)<').findall(match)[0]
scrapedplot = re.sub("<[^>]+>"," ",scrapedplot).strip()
except:
scrapedplot = ""
logger.info(scrapedtitle)
# Añade al listado
itemlist.append( Item(channel=__channel__, action="videos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot ,context='4', folder=True) )
# Extrae la marca de siguiente página
patronvideos = '<a href="([^"]+)"><span class="thide pnext">Next</span>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedtitle = "Página siguiente"
scrapedurl = urlparse.urljoin(item.url,matches[0])
scrapedthumbnail = ""
itemlist.append( Item( channel=__channel__ , title=scrapedtitle , action="novedades" , url=scrapedurl , thumbnail=scrapedthumbnail, folder=True ) )
return itemlist
def videos(item):
logger.info("[newhd.py] videos")
# Descarga la página
data = scrapertools.cachePage(item.url)
title= item.title
scrapedthumbnail = item.thumbnail
scrapedplot = item.plot
listavideos = servertools.findvideos(data)
itemlist = []
for video in listavideos:
scrapedtitle = title.strip() + " - " + video[0]
videourl = video[1]
server = video[2]
#logger.info("videotitle="+urllib.quote_plus( videotitle ))
#logger.info("plot="+urllib.quote_plus( plot ))
#plot = ""
#logger.info("title="+urllib.quote_plus( title ))
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , url=videourl , thumbnail=scrapedthumbnail , plot=scrapedplot , server=server , folder=False) )
return itemlist
def alfa(item):
logger.info("[newhd.py] alfa")
# Descarga la página
data = scrapertools.cachePage(item.url)
# Extrae las entradas
patronvideos = '<a href="([^"]+)" class="blue">(.)</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
scrapedurl = urlparse.urljoin(item.url,match[0])
scrapedurl = scrapedurl.replace("&","&")
scrapedtitle = match[1]
scrapedthumbnail = ""
scrapedplot = ""
logger.info(scrapedtitle)
# Añade al listado
itemlist.append( Item(channel=__channel__, action="novedades", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def cat(item):
logger.info("[newhd.py] cat")
# Descarga la página
data = scrapertools.cachePage(item.url)
# Extrae las entradas
patronvideos = '<a title="([^"]+)" href="(/index.php\?do\=cat\&category\=[^"]+)">'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
scrapedurl = urlparse.urljoin(item.url,match[1])
scrapedtitle = match[0]
scrapedthumbnail = ""
scrapedplot = ""
# Añade al listado
itemlist.append( Item(channel=__channel__, action="novedades", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def searchlist(item):
logger.info("[newhd.py] searchlist")
# Descarga la página
data = scrapertools.cachePage(item.url)
# Extrae las entradas
patronvideos = 'class="newstitle">N°(.*?)</div></td>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
try:
scrapedurl = re.compile(r'href="(.+?)"').findall(match)[0]
except:continue
try:
scrapedtitle = re.compile(r'html" >(.+?)</a>').findall(match)[0]
except:
scrapedtitle = "untitle"
try:
scrapedthumbnail = urlparse.urljoin(item.url,re.compile(r'<img src="([^"]+)" width=').findall(match)[0])
except:
scrapedthumbnail = ""
try:
scrapedplot = re.compile(r'(<div id=.+?)</div></td>').findall(match)[0]
scrapedplot = re.sub("<[^>]+>"," ",scrapedplot).strip()
except:
scrapedplot = ""
logger.info(scrapedtitle)
# Añade al listado
itemlist.append( Item(channel=__channel__, action="videos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot ,context='4' , folder=True) )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
from servers import servertools
# mainlist
mainlist_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
espanol_items = idioma(mainlist_items[0])
novedades_items = novedades(espanol_items[0])
bien = False
for novedades_item in novedades_items:
mirrors = servertools.find_video_items( item=novedades_item )
if len(mirrors)>0:
bien = True
break
return bien | gpl-3.0 | -2,544,684,038,102,745,000 | 43.923077 | 605 | 0.635163 | false |
undefinedv/Jingubang | sqlmap/plugins/generic/misc.py | 2 | 7195 | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import ntpath
import re
from lib.core.common import Backend
from lib.core.common import hashDBWrite
from lib.core.common import isStackingAvailable
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import posixToNtSlashes
from lib.core.common import readInput
from lib.core.common import singleTimeDebugMessage
from lib.core.common import unArrayizeValue
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.enums import DBMS
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import OS
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.request import inject
class Miscellaneous:
"""
This class defines miscellaneous functionalities for plugins.
"""
def __init__(self):
pass
def getRemoteTempPath(self):
if not conf.tmpPath and Backend.isDbms(DBMS.MSSQL):
debugMsg = "identifying Microsoft SQL Server error log directory "
debugMsg += "that sqlmap will use to store temporary files with "
debugMsg += "commands' output"
logger.debug(debugMsg)
_ = unArrayizeValue(inject.getValue("SELECT SERVERPROPERTY('ErrorLogFileName')", safeCharEncode=False))
if _:
conf.tmpPath = ntpath.dirname(_)
if not conf.tmpPath:
if Backend.isOs(OS.WINDOWS):
if conf.direct:
conf.tmpPath = "%TEMP%"
else:
self.checkDbmsOs(detailed=True)
if Backend.getOsVersion() in ("2000", "NT"):
conf.tmpPath = "C:/WINNT/Temp"
elif Backend.isOs("XP"):
conf.tmpPath = "C:/Documents and Settings/All Users/Application Data/Temp"
else:
conf.tmpPath = "C:/Windows/Temp"
else:
conf.tmpPath = "/tmp"
if re.search(r"\A[\w]:[\/\\]+", conf.tmpPath, re.I):
Backend.setOs(OS.WINDOWS)
conf.tmpPath = normalizePath(conf.tmpPath)
conf.tmpPath = ntToPosixSlashes(conf.tmpPath)
singleTimeDebugMessage("going to use '%s' as temporary files directory" % conf.tmpPath)
hashDBWrite(HASHDB_KEYS.CONF_TMP_PATH, conf.tmpPath)
return conf.tmpPath
def getVersionFromBanner(self):
if "dbmsVersion" in kb.bannerFp:
return
infoMsg = "detecting back-end DBMS version from its banner"
logger.info(infoMsg)
if Backend.isDbms(DBMS.MYSQL):
first, last = 1, 6
elif Backend.isDbms(DBMS.PGSQL):
first, last = 12, 6
elif Backend.isDbms(DBMS.MSSQL):
first, last = 29, 9
else:
raise SqlmapUnsupportedFeatureException("unsupported DBMS")
query = queries[Backend.getIdentifiedDbms()].substring.query % (queries[Backend.getIdentifiedDbms()].banner.query, first, last)
if conf.direct:
query = "SELECT %s" % query
kb.bannerFp["dbmsVersion"] = unArrayizeValue(inject.getValue(query))
kb.bannerFp["dbmsVersion"] = (kb.bannerFp["dbmsVersion"] or "").replace(",", "").replace("-", "").replace(" ", "")
def delRemoteFile(self, filename):
if not filename:
return
self.checkDbmsOs()
if Backend.isOs(OS.WINDOWS):
filename = posixToNtSlashes(filename)
cmd = "del /F /Q %s" % filename
else:
cmd = "rm -f %s" % filename
self.execCmd(cmd, silent=True)
def createSupportTbl(self, tblName, tblField, tblType):
inject.goStacked("DROP TABLE %s" % tblName, silent=True)
if Backend.isDbms(DBMS.MSSQL) and tblName == self.cmdTblName:
inject.goStacked("CREATE TABLE %s(id INT PRIMARY KEY IDENTITY, %s %s)" % (tblName, tblField, tblType))
else:
inject.goStacked("CREATE TABLE %s(%s %s)" % (tblName, tblField, tblType))
def cleanup(self, onlyFileTbl=False, udfDict=None, web=False):
"""
Cleanup file system and database from sqlmap create files, tables
and functions
"""
if web and self.webBackdoorFilePath:
logger.info("cleaning up the web files uploaded")
self.delRemoteFile(self.webStagerFilePath)
self.delRemoteFile(self.webBackdoorFilePath)
if not isStackingAvailable() and not conf.direct:
return
if Backend.isOs(OS.WINDOWS):
libtype = "dynamic-link library"
elif Backend.isOs(OS.LINUX):
libtype = "shared object"
else:
libtype = "shared library"
if onlyFileTbl:
logger.debug("cleaning up the database management system")
else:
logger.info("cleaning up the database management system")
logger.debug("removing support tables")
inject.goStacked("DROP TABLE %s" % self.fileTblName, silent=True)
inject.goStacked("DROP TABLE %shex" % self.fileTblName, silent=True)
if not onlyFileTbl:
inject.goStacked("DROP TABLE %s" % self.cmdTblName, silent=True)
if Backend.isDbms(DBMS.MSSQL):
udfDict = {"master..new_xp_cmdshell": None}
if udfDict is None:
udfDict = self.sysUdfs
for udf, inpRet in udfDict.items():
message = "do you want to remove UDF '%s'? [Y/n] " % udf
output = readInput(message, default="Y")
if not output or output in ("y", "Y"):
dropStr = "DROP FUNCTION %s" % udf
if Backend.isDbms(DBMS.PGSQL):
inp = ", ".join(i for i in inpRet["input"])
dropStr += "(%s)" % inp
logger.debug("removing UDF '%s'" % udf)
inject.goStacked(dropStr, silent=True)
logger.info("database management system cleanup finished")
warnMsg = "remember that UDF %s files " % libtype
if conf.osPwn:
warnMsg += "and Metasploit related files in the temporary "
warnMsg += "folder "
warnMsg += "saved on the file system can only be deleted "
warnMsg += "manually"
logger.warn(warnMsg)
def likeOrExact(self, what):
message = "do you want sqlmap to consider provided %s(s):\n" % what
message += "[1] as LIKE %s names (default)\n" % what
message += "[2] as exact %s names" % what
choice = readInput(message, default='1')
if not choice or choice == '1':
choice = '1'
condParam = " LIKE '%%%s%%'"
elif choice == '2':
condParam = "='%s'"
else:
errMsg = "invalid value"
raise SqlmapNoneDataException(errMsg)
return choice, condParam
| gpl-3.0 | 5,569,701,877,929,334,000 | 32.938679 | 135 | 0.59458 | false |
glenn124f/treeherder | treeherder/config/settings_local.example.py | 2 | 1294 | # Switch to using a different bugzilla instance
BZ_API_URL = "https://bugzilla-dev.allizom.org"
# Applications useful for development, e.g. debug_toolbar, django_extensions.
LOCAL_APPS = ['debug_toolbar']
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/treeherder/treeherder.log',
'maxBytes': 5 * 1024 * 1024,
'backupCount': 2,
'formatter': 'standard',
},
},
'loggers': {
'django': {
'handlers': ['console', 'logfile'],
'level': 'INFO',
'propagate': True,
},
'hawkrest': {
'handlers': ['console'],
'level': 'WARNING',
},
'treeherder': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
'propagate': False,
}
}
}
| mpl-2.0 | -1,181,289,819,140,094,200 | 27.130435 | 86 | 0.473725 | false |
chairmanK/eulerian-audio-magnification | utils.py | 1 | 6313 | import numpy as np
from scipy.io import wavfile
from scipy.signal import firwin, filtfilt, hamming, resample
default_nyquist = 22050.0
def slurp_wav(path, start=0, end=(44100 * 10)):
"""Read samples from the 0th channel of a WAV file specified by
*path*."""
(fs, signal) = wavfile.read(path)
nyq = fs / 2.0
# For expediency, just pull one channel
if signal.ndim > 1:
signal = signal[:, 0]
signal = signal[start:end]
return (nyq, signal)
def _num_windows(length, window, step):
return max(0, int((length - window + step) / step))
def window_slice_iterator(length, window, step):
"""Generate slices into a 1-dimensional array of specified *length*
with the specified *window* size and *step* size.
Yields slice objects of length *window*. Any remainder at the end is
unceremoniously truncated.
"""
num_windows = _num_windows(length, window, step)
for i in xrange(num_windows):
start = step * i
end = start + window
yield slice(start, end)
def stft(signal, window=1024, step=None, n=None):
"""Compute the short-time Fourier transform on a 1-dimensional array
*signal*, with the specified *window* size, *step* size, and
*n*-resolution FFT.
This function returns a 2-dimensional array of complex floats. The
0th dimension is time (window steps) and the 1th dimension is
frequency.
"""
if step is None:
step = window / 2
if n is None:
n = window
if signal.ndim != 1:
raise ValueError("signal must be a 1-dimensional array")
length = signal.size
num_windows = _num_windows(length, window, step)
out = np.zeros((num_windows, n), dtype=np.complex64)
taper = hamming(window)
for (i, s) in enumerate(window_slice_iterator(length, window, step)):
out[i, :] = np.fft.fft(signal[s] * taper, n)
pyr = stft_laplacian_pyramid(out)
return out
def stft_laplacian_pyramid(spectrogram, levels=None):
"""For each window of the spectrogram, construct laplacian pyramid
on the real and imaginary components of the FFT.
"""
(num_windows, num_freqs) = spectrogram.shape
if levels is None:
levels = int(np.log2(num_freqs))
# (num_windows, num_frequencies, levels)
pyr = np.zeros(spectrogram.shape + (levels,), dtype=np.complex)
for i in xrange(num_windows):
real_pyr = list(laplacian_pyramid(np.real(spectrogram[i, :]), levels=levels))
imag_pyr = list(laplacian_pyramid(np.imag(spectrogram[i, :]), levels=levels))
for j in xrange(levels):
pyr[i, :, j] = real_pyr[j] + 1.0j * imag_pyr[j]
return pyr
def laplacian_pyramid(arr, levels=None):
if arr.ndim != 1:
raise ValueError("arr must be 1-dimensional")
if levels is None:
levels = int(np.log2(arr.size))
tap = np.array([1.0, 4.0, 6.0, 4.0, 1.0]) / 16.0
tap_fft = np.fft.fft(tap, arr.size)
for i in xrange(levels):
smoothed = np.real(np.fft.ifft(np.fft.fft(arr) * tap_fft))
band = arr - smoothed
yield band
arr = smoothed
def amplify_pyramid(pyr, passband, fs, gain=5.0):
tap = firwin(100, passband, nyq=(fs / 2.0), pass_zero=False)
(_, num_freqs, levels) = pyr.shape
amplified_pyr = np.copy(pyr)
for i in xrange(num_freqs):
for j in xrange(levels):
amplitude = gain * filtfilt(tap, [1.0], np.abs(pyr[:, i, j]))
theta = np.angle(pyr[:, i, j])
amplified_pyr[:, i, j] += amplitude * np.exp(1.0j * theta)
return amplified_pyr
def resynthesize(spectrogram, window=1024, step=None, n=None):
"""Compute the short-time Fourier transform on a 1-dimensional array
*signal*, with the specified *window* size, *step* size, and
*n*-resolution FFT.
This function returns a 2-dimensional array of complex floats. The
0th dimension is time (window steps) and the 1th dimension is
frequency.
"""
if step is None:
step = window / 2
if n is None:
n = window
if spectrogram.ndim != 2:
raise ValueError("spectrogram must be a 2-dimensional array")
(num_windows, num_freqs) = spectrogram.shape
length = step * (num_windows - 1) + window
signal = np.zeros((length,))
for i in xrange(num_windows):
snippet = np.real(np.fft.ifft(spectrogram[i, :], window))
signal[(step * i):(step * i + window)] += snippet
signal = signal[window:]
ceiling = np.max(np.abs(signal))
signal = signal / ceiling * 0.9 * 0x8000
signal = signal.astype(np.int16)
return signal
def amplify_modulation(spectrogram, fs, passband=[1.0, 10.0], gain=0.0):
(num_windows, num_freqs) = spectrogram.shape
envelope = np.abs(spectrogram)
amplification = np.ones(envelope.shape)
if gain > 0.0:
taps = firwin(200, passband, nyq=(fs / 2.0), pass_zero=False)
for i in xrange(num_freqs):
#amplification[:, i] = envelope[:, i] + gain * filtfilt(
# taps, [1.0], envelope[:, i])
amplification[:, i] = gain * filtfilt(
taps, [1.0], envelope[:, i])
amplification = np.maximum(0.0, amplification)
amplified_spectrogram = spectrogram * amplification
return amplified_spectrogram
def svd_truncation(spectrogram, k=[0]):
"""Compute SVD of the spectrogram, trunate to *k* components,
reconstitute a new spectrogram."""
# SVD of the spectrogram:
# u.shape == (num_windows, k)
# s.shape == (k, k)
# v.shape == (k, n)
# where
# k == min(num_windows, n)
(left, sv, right) = np.linalg.svd(spectrogram, full_matrices=False)
zero_out = np.array([i for i in xrange(sv.size) if i not in k])
if zero_out.size:
sv[zero_out] = 0.0
truncated = np.dot(left, sv[:, np.newaxis] * right)
return truncated
def total_power(spectrogram):
return np.power(np.abs(spectrogram), 2).sum()
def normalize_total_power(spectrogram, total):
unit_power = spectrogram / np.sqrt(total_power(spectrogram))
return unit_power * np.sqrt(total)
def estimate_spectral_power(spectrogram):
"""Given a spectrogram, compute power for each frequency band."""
# compute mean power at each frequency
power = np.power(np.abs(spectrogram), 2).mean(axis=0)
return power
| mit | 1,777,794,232,154,999,300 | 35.074286 | 85 | 0.631237 | false |
ahvigil/MultiQC | multiqc/modules/custom_content/custom_content.py | 1 | 16940 | #!/usr/bin/env python
""" Core MultiQC module to parse output from custom script output """
from __future__ import print_function
from collections import defaultdict, OrderedDict
import logging
import json
import os
import yaml
from multiqc import config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import table, bargraph, linegraph, scatter, heatmap, beeswarm
# Initialise the logger
log = logging.getLogger(__name__)
def custom_module_classes():
"""
MultiQC Custom Content class. This module does a lot of different
things depending on the input and is as flexible as possible.
NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES
"""
# Dict to hold parsed data. Each key should contain a custom data type
# eg. output from a particular script. Note that this script may pick
# up many different types of data from many different sources.
# Second level keys should be 'config' and 'data'. Data key should then
# contain sample names, and finally data.
cust_mods = defaultdict(lambda: defaultdict(lambda: OrderedDict()))
# Dictionary to hold search patterns - start with those defined in the config
search_patterns = OrderedDict()
search_patterns['core_sp'] = config.sp['custom_content']
# First - find files using patterns described in the config
config_data = getattr(config, 'custom_data', {})
for k,f in config_data.items():
# Check that we have a dictionary
if type(f) != dict:
log.debug("config.custom_data row was not a dictionary: {}".format(k))
continue
c_id = f.get('id', k)
# Data supplied in with config (eg. from a multiqc_config.yaml file in working directory)
if 'data' in f:
cust_mods[c_id]['data'].update( f['data'] )
cust_mods[c_id]['config'].update( { k:v for k, v in f.items() if k is not 'data' } )
continue
# File name patterns supplied in config
if 'sp' in f:
cust_mods[c_id]['config'] = f
search_patterns[c_id] = f['sp']
else:
log.debug("Search pattern not found for custom module: {}".format(c_id))
# Now go through each of the file search patterns
bm = BaseMultiqcModule()
for k, sp in search_patterns.items():
for f in bm.find_log_files(sp):
f_extension = os.path.splitext(f['fn'])[1]
# YAML and JSON files are the easiest
parsed_data = None
if f_extension == '.yaml' or f_extension == '.yml':
try:
# Parsing as OrderedDict is slightly messier with YAML
# http://stackoverflow.com/a/21048064/713980
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor)
parsed_data = yaml.load(f['f'])
except Exception as e:
log.warning("Error parsing YAML file '{}' (probably invalid YAML)".format(f['fn']))
log.warning("YAML error: {}".format(e))
break
elif f_extension == '.json':
try:
# Use OrderedDict for objects so that column order is honoured
parsed_data = json.loads(f['f'], object_pairs_hook=OrderedDict)
except Exception as e:
log.warning("Error parsing JSON file '{}' (probably invalid JSON)".format(f['fn']))
log.warning("JSON error: {}".format(e))
break
if parsed_data is not None:
c_id = parsed_data.get('id', k)
if len(parsed_data.get('data', {})) > 0:
cust_mods[c_id]['data'].update( parsed_data['data'] )
cust_mods[c_id]['config'].update ( { j:k for j,k in parsed_data.items() if j != 'data' } )
else:
log.warning("No data found in {}".format(f['fn']))
# txt, csv, tsv etc
else:
# Look for configuration details in the header
m_config = _find_file_header( f )
s_name = None
if m_config is not None:
c_id = m_config.get('id', k)
cust_mods[c_id]['config'].update( m_config )
m_config = cust_mods[c_id]['config']
s_name = m_config.get('sample_name')
else:
c_id = k
m_config = dict()
# Guess sample name if not given
if s_name is None:
s_name = bm.clean_s_name(f['s_name'], f['root'])
# Guess c_id if no information known
if k == 'core_sp':
c_id = s_name
# Add information about the file to the config dict
if 'files' not in m_config:
m_config['files'] = dict()
m_config['files'].update( { s_name : { 'fn': f['fn'], 'root': f['root'] } } )
# Guess file format if not given
if m_config.get('file_format') is None:
m_config['file_format'] = _guess_file_format( f )
# Parse data
try:
parsed_data, conf = _parse_txt( f, m_config )
if parsed_data is None or len(parsed_data) == 0:
log.warning("Not able to parse custom data in {}".format(f['fn']))
else:
# Did we get a new section id from the file?
if conf.get('id') is not None:
c_id = conf.get('id')
# heatmap - special data type
if type(parsed_data) == list:
cust_mods[c_id]['data'] = parsed_data
else:
cust_mods[c_id]['data'].update(parsed_data)
cust_mods[c_id]['config'].update(conf)
except (IndexError, AttributeError, TypeError):
log.error("Unexpected parsing error for {}".format(f['fn']), exc_info=True)
raise # testing
# Remove any configs that have no data
remove_cids = [ k for k in cust_mods if len(cust_mods[k]['data']) == 0 ]
for k in remove_cids:
del cust_mods[k]
if len(cust_mods) == 0:
log.debug("No custom content found")
raise UserWarning
# Go through each data type
parsed_modules = list()
for k, mod in cust_mods.items():
# General Stats
if mod['config'].get('plot_type') == 'generalstats':
gsheaders = mod['config'].get('pconfig')
if gsheaders is None:
headers = set()
for d in mod['data'].values():
headers.update(d.keys())
headers = list(headers)
headers.sort()
gsheaders = OrderedDict()
for k in headers:
gsheaders[k] = dict()
# Headers is a list of dicts
if type(gsheaders) == list:
hs = OrderedDict()
for h in gsheaders:
for k, v in h.items():
hs[k] = v
gsheaders = hs
# Add namespace if not specified
for k in gsheaders:
if 'namespace' not in gsheaders[k]:
gsheaders[k]['namespace'] = c_id
bm.general_stats_addcols(mod['data'], gsheaders)
# Initialise this new module class and append to list
else:
parsed_modules.append( MultiqcModule(k, mod) )
log.info("{}: Found {} samples ({})".format(k, len(mod['data']), mod['config'].get('plot_type')))
return parsed_modules
class MultiqcModule(BaseMultiqcModule):
""" Module class, used for each custom content type """
def __init__(self, c_id, mod):
modname = mod['config'].get('section_name', c_id.replace('_', ' ').title())
# Initialise the parent object
super(MultiqcModule, self).__init__(
name = modname,
anchor = mod['config'].get('section_anchor', c_id),
href = mod['config'].get('section_href'),
info = mod['config'].get('description')
)
pconfig = mod['config'].get('pconfig', {})
if pconfig.get('title') is None:
pconfig['title'] = modname
# Table
if mod['config'].get('plot_type') == 'table':
pconfig['sortRows'] = pconfig.get('sortRows', False)
headers = mod['config'].get('headers')
self.intro += table.plot(mod['data'], headers, pconfig)
# Bar plot
elif mod['config'].get('plot_type') == 'bargraph':
self.intro += bargraph.plot(mod['data'], mod['config'].get('categories'), pconfig)
# Line plot
elif mod['config'].get('plot_type') == 'linegraph':
self.intro += linegraph.plot(mod['data'], pconfig)
# Scatter plot
elif mod['config'].get('plot_type') == 'scatter':
self.intro += scatter.plot(mod['data'], pconfig)
# Heatmap
elif mod['config'].get('plot_type') == 'heatmap':
self.intro += heatmap.plot(mod['data'], mod['config'].get('xcats'), mod['config'].get('ycats'), pconfig)
# Beeswarm plot
elif mod['config'].get('plot_type') == 'beeswarm':
self.intro += beeswarm.plot(mod['data'], pconfig)
# Not supplied
elif mod['config'].get('plot_type') == None:
log.warning("Plot type not found for content ID '{}'".format(c_id))
# Not recognised
else:
log.warning("Error - custom content plot type '{}' not recognised for content ID {}".format(mod['config'].get('plot_type'), c_id))
def _find_file_header(f):
# Collect commented out header lines
hlines = []
for l in f['f'].splitlines():
if l.startswith('#'):
hlines.append(l[1:])
hconfig = None
try:
hconfig = yaml.load("\n".join(hlines))
except yaml.YAMLError:
log.debug("Could not parse comment file header for MultiQC custom content: {}".format(f['fn']))
return hconfig
def _guess_file_format(f):
"""
Tries to guess file format, first based on file extension (csv / tsv),
then by looking for common column separators in the first 10 non-commented lines.
Splits by tab / comma / space and counts resulting number of columns. Finds the most
common column count, then comparsed how many lines had this number.
eg. if tab, all 10 lines should have x columns when split by tab.
Returns: csv | tsv | spaces (spaces by default if all else fails)
"""
filename, file_extension = os.path.splitext(f['fn'])
tabs = []
commas = []
spaces = []
j = 0
for l in f['f'].splitlines():
if not l.startswith('#'):
j += 1
tabs.append(len(l.split("\t")))
commas.append(len(l.split(",")))
spaces.append(len(l.split()))
if j == 10:
break
tab_mode = max(set(tabs), key=tabs.count)
commas_mode = max(set(commas), key=commas.count)
spaces_mode = max(set(spaces), key=spaces.count)
tab_lc = tabs.count(tab_mode) if tab_mode > 1 else 0
commas_lc = commas.count(commas_mode) if commas_mode > 1 else 0
spaces_lc = spaces.count(spaces_mode) if spaces_mode > 1 else 0
if tab_lc == j:
return 'tsv'
elif commas_lc == j:
return 'csv'
else:
if tab_lc > commas_lc and tab_lc > spaces_lc:
return 'tsv'
elif commas_lc > tab_lc and commas_lc > spaces_lc:
return 'csv'
elif spaces_lc > tab_lc and spaces_lc > commas_lc:
return 'spaces'
else:
if tab_mode == commas_lc and tab_mode > spaces_lc:
if tab_mode > commas_mode:
return 'tsv'
else:
return 'csv'
return 'spaces'
def _parse_txt(f, conf):
# Split the data into a list of lists by column
sep = None
if conf['file_format'] == 'csv':
sep = ","
if conf['file_format'] == 'tsv':
sep = "\t"
lines = f['f'].splitlines()
d = []
ncols = None
for l in lines:
if not l.startswith('#'):
sections = l.split(sep)
d.append(sections)
if ncols is None:
ncols = len(sections)
elif ncols != len(sections):
log.warn("Inconsistent number of columns found in {}! Skipping..".format(f['fn']))
return (None, conf)
# Convert values to floats if we can
first_row_str = 0
for i,l in enumerate(d):
for j, v in enumerate(l):
try:
d[i][j] = float(v)
except ValueError:
if (v.startswith('"') and v.endswith('"')) or (v.startswith("'") and v.endswith("'")):
v = v[1:-1]
d[i][j] = v
# Count strings in first row (header?)
if i == 0:
first_row_str += 1
all_numeric = all([ type(l) == float for l in d[i][1:] for i in range(1, len(d)) ])
# Heatmap: Number of headers == number of lines
if conf.get('plot_type') is None and first_row_str == len(lines) and all_numeric:
conf['plot_type'] = 'heatmap'
if conf.get('plot_type') == 'heatmap':
conf['xcats'] = d[0][1:]
conf['ycats'] = [s[0] for s in d[1:]]
data = [s[1:] for s in d[1:]]
return (data, conf)
# Header row of strings, or configured as table
if first_row_str == len(d[0]) or conf.get('plot_type') == 'table':
data = OrderedDict()
for s in d[1:]:
data[s[0]] = OrderedDict()
for i, v in enumerate(s[1:]):
cat = str(d[0][i+1])
data[s[0]][cat] = v
# Bar graph or table - if numeric data, go for bar graph
if conf.get('plot_type') is None:
allfloats = True
for r in d:
for v in r[1:]:
allfloats = allfloats and type(v) == float
if allfloats:
conf['plot_type'] = 'bargraph'
else:
conf['plot_type'] = 'table'
if conf.get('plot_type') == 'bargraph' or conf.get('plot_type') == 'table':
return (data, conf)
else:
data = OrderedDict() # reset
# Scatter plot: First row is str : num : num
if (conf.get('plot_type') is None and len(d[0]) == 3 and
type(d[0][0]) != float and type(d[0][1]) == float and type(d[0][2]) == float):
conf['plot_type'] = 'scatter'
if conf.get('plot_type') == 'scatter':
data = dict()
for s in d:
try:
data[s[0]] = {
'x': float(s[1]),
'y': float(s[2])
}
except (IndexError, ValueError):
pass
return (data, conf)
# Single sample line / bar graph - first row has two columns
if len(d[0]) == 2:
# Line graph - row, num : num
if (conf.get('plot_type') is None and type(d[0][0]) == float and type(d[0][1]) == float):
conf['plot_type'] = 'linegraph'
# Bar graph - str : num
if (conf.get('plot_type') is None and type(d[0][0]) != float and type(d[0][1]) == float):
conf['plot_type'] = 'bargraph'
# Data structure is the same
if (conf.get('plot_type') == 'linegraph' or conf.get('plot_type') == 'bargraph'):
# Set section id based on directory if not known
if conf.get('id') is None:
conf['id'] = os.path.basename(f['root'])
data = OrderedDict()
for s in d:
data[s[0]] = s[1]
return ( { f['s_name']: data }, conf )
# Multi-sample line graph: No header row, str : lots of num columns
if conf.get('plot_type') is None and len(d[0]) > 4 and all_numeric:
conf['plot_type'] = 'linegraph'
if conf.get('plot_type') == 'linegraph':
data = dict()
# Use 1..n range for x values
for s in d:
data[s[0]] = dict()
for i,v in enumerate(s[1:]):
j = i+1
data[s[0]][i+1] = v
return (data, conf)
# Got to the end and haven't returned. It's a mystery, capn'!
log.debug("Not able to figure out a plot type for '{}' ".format(f['fn']) +
"plot type = {}, all numeric = {}, first row str = {}".format( conf.get('plot_type'), all_numeric, first_row_str ))
return (None, conf)
| gpl-3.0 | -8,655,283,556,172,694,000 | 38.032258 | 142 | 0.520838 | false |
victorywang80/Maintenance | saltstack/src/salt/states/ntp.py | 1 | 2252 | # -*- coding: utf-8 -*-
'''
Management of NTP servers
=========================
.. versionadded:: Hydrogen
This state is used to manage NTP servers. Currently only Windows is supported.
.. code-block:: yaml
win_ntp:
ntp.managed:
- servers:
- pool.ntp.org
- us.pool.ntp.org
'''
# Import python libs
import logging
# Import salt libs
from salt._compat import string_types
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
This only supports Windows
'''
if not salt.utils.is_windows():
return False
return 'ntp'
def _check_servers(servers):
if not isinstance(servers, list):
return False
for server in servers:
if not isinstance(server, string_types):
return False
return True
def _get_servers():
try:
return set(__salt__['ntp.get_servers']())
except TypeError:
return set([False])
def managed(name, servers=None):
'''
Manage NTP servers
servers
A list of NTP servers
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'NTP servers already configured as specified'}
if not _check_servers(servers):
ret['result'] = False
ret['comment'] = 'NTP servers must be a list of strings'
before_servers = _get_servers()
desired_servers = set(servers)
if before_servers == desired_servers:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('NTP servers will be updated to: {0}'
.format(', '.join(sorted(desired_servers))))
return ret
__salt__['ntp.set_servers'](*desired_servers)
after_servers = _get_servers()
if after_servers == desired_servers:
ret['comment'] = 'NTP servers updated'
ret['changes'] = {'old': sorted(before_servers),
'new': sorted(after_servers)}
else:
ret['result'] = False
ret['comment'] = 'Failed to update NTP servers'
if before_servers != after_servers:
ret['changes'] = {'old': sorted(before_servers),
'new': sorted(after_servers)}
return ret
| apache-2.0 | -8,279,129,994,031,068,000 | 22.216495 | 78 | 0.561279 | false |
fredex42/gnmvidispine | gnmvidispine/vs_acl.py | 1 | 7021 | from .vidispine_api import VSApi
from pprint import pprint
import xml.etree.ElementTree as ET
class VSAccess(VSApi):
"""
Represents an individual entry in an Access Control List (ACL)
"""
def __init__(self,*args,**kwargs):
super(VSAccess,self).__init__(*args,**kwargs)
self.parent = None
self.name = "INVALIDNAME"
ns = "{http://xml.vidispine.com/schema/vidispine}"
self.dataContent = ET.Element('{0}access'.format(ns))
def populate(self, item, accessid):
"""
Populate from a Vidispine object
:param item: VSItem or other VS-type object to query
:param accessid: Type of access to query
:return: None
"""
self.parent = item
self.name = accessid
url = "/{0}/{1}/access/{2}".format(item.type,item.name,accessid)
self.dataContent = self.request(url)
def populateFromNode(self,node):
self.dataContent = node
ns = "{http://xml.vidispine.com/schema/vidispine}"
self.name = node.attrib['id']
def nodeContent(self,node,path):
"""
Internal method to get the content of a node
:param node:
:param path:
:return:
"""
ns = "{http://xml.vidispine.com/schema/vidispine}"
try:
return node.find('{0}{1}'.format(ns,path)).text
except Exception:
return None
def setNode(self,node,path,value):
ns = "{http://xml.vidispine.com/schema/vidispine}"
try:
node.find('{0}{1}'.format(ns,path)).text = value
except Exception:
n = ET.Element(path)
n.text = value
node.append(n)
@property
def grantor(self):
"""
:return: Name of the grantor
"""
#pprint(self.dataContent)
return self.nodeContent(self.dataContent,'grantor')
@property
def recursive(self):
"""
:return: Is this recursively inherited
"""
return self.nodeContent(self.dataContent,'recursive')
@recursive.setter
def recursive(self,value):
"""
Set whether this access is recursive
:param value:
:return:
"""
str="false"
if value:
str="true"
self.setNode(self.dataContent,'recursive',str)
@property
def permission(self):
return self.nodeContent(self.dataContent,'permission')
@permission.setter
def permission(self,value):
if value != 'NONE' and value != 'READ' and value !='WRITE' and value != 'ALL':
raise ValueError
self.setNode(self.dataContent,'permission',value)
@property
def affectedUser(self):
return self.nodeContent(self.dataContent,'user')
@affectedUser.setter
def affectedUser(self,value):
self.setNode(self.dataContent,'user',value)
@property
def group(self):
return self.nodeContent(self.dataContent,'group')
@group.setter
def group(self,value):
self.setNode(self.dataContent,'group',value)
def __eq__(self, other):
if not isinstance(other,VSAccess):
raise TypeError
if other.group==self.group and other.user==self.user and other.permission==self.permission and other.affectedUser==self.affectedUser and other.recursive==self.recursive and other.grantor==self.grantor:
return True
return False
def asXML(self):
"""
Returns a Vidispine XML representing the contents of this entry
:return: String of Vidispine XML
"""
ns = "{http://xml.vidispine.com/schema/vidispine}"
docNode = ET.Element('{0}AccessControlDocument'.format(ns))
for n in self.dataContent:
docNode.append(n)
return ET.tostring(docNode,encoding="UTF-8")
class VSAcl(VSApi):
"""
Represents an Access Control List (ACL) for a Vidispine object
"""
def __init__(self,*args,**kwargs):
super(VSAcl,self).__init__(*args,**kwargs)
self.parent = None
self.name = "INVALIDNAME"
self._entries = []
def populate(self, item):
"""
Retrieve data from a Vidispine entity and load it into this object
:param item: VSItem or similar VS-object to retrieve access control information for
:return: None
"""
self.parent = item
self._entries = []
url = "/{0}/{1}/access".format(item.type,item.name)
dataContent = self.request(url)
ns = "{http://xml.vidispine.com/schema/vidispine}"
for node in dataContent.findall('{0}access'.format(ns)):
a = VSAccess(self.host,self.port,self.user,self.passwd)
a.populateFromNode(node)
self._entries.append(a)
def populateFromString(self,xmldata):
"""
Reads ACL data in from a string
:param xmldata: String representing XML of ACL data
:return: None
"""
self.parent = None
self._entries = []
dataContent = ET.fromstring(xmldata)
ns = "{http://xml.vidispine.com/schema/vidispine}"
for node in dataContent.findall('{0}access'.format(ns)):
a = VSAccess(self.host,self.port,self.user,self.passwd)
a.populateFromNode(node)
self._entries.append(a)
def entries(self):
"""
Generator to yield each entry of the access control list as a VSAccess object
:return:
"""
for e in self._entries:
yield e
def add(self,entry):
"""
Add a VSAccess entry to the list
:param entry: VSAccess to add
:return: None
"""
url = "/{0}/{1}/access".format(self.parent.type,self.parent.name)
self.request(url,method="POST",body=entry.asXML())
self.populate(self.parent)
def removeByRef(self, entry):
"""
Remove the entry from the ACL
:param entry: VSAccess entry to remove from the list
:return:
"""
if entry.permission =="OWNER":
return #can't remove owner permission
url = "/{0}/{1}/access/{2}".format(self.parent.type,self.parent.name,entry.name)
self.request(url,method="DELETE")
self.populate(self.parent)
def filter(self,grantor=None,recursive=None,permission=None,affectedUser=None,group=None):
#rtn = []
for e in self._entries:
should_take = True
if grantor is not None and e.grantor!=grantor:
should_take=False
if recursive is not None and e.recursive!=recursive:
should_take=False
if permission is not None and e.permission!=permission:
should_take=False
if affectedUser is not None and e.affectedUser!=affectedUser:
should_take=False
if group is not None and e.group!=group:
should_take=False
if should_take:
yield e
| gpl-2.0 | -4,825,686,206,120,529,000 | 30.34375 | 209 | 0.586669 | false |
Digirolamo/pythonicqt | pythonicqt/examples/examplebase.py | 1 | 1908 | """File contains base class/metaclass for examples.
Auto collects imported examples so the example editor can easily find them."""
import sys
from collections import OrderedDict
import six
from pythonicqt.Qt import QtCore, QtGui
class MetaExample(type(QtCore.Qt)):
"""Stores subclasses in a OrderedDict for easy collection.
all_examples starts as None, but becomes and OrderedDict
mapping module to example"""
all_examples = None
def __init__(cls, name, bases, dct):
#Create the list and don't add the base for the first base example.
if MetaExample.all_examples is None:
MetaExample.all_examples = OrderedDict()
else:
if cls.__module__ in MetaExample.all_examples:
first_name = MetaExample.all_examples[cls.__module__].__name__
second_name = cls.__name__
raise TypeError("Only one item can inherit from ExampleBase per module."
"Issue with {} and {} in {}."
"".format(first_name, second_name, cls.__module__))
MetaExample.all_examples[cls.__module__] = cls
type(QtCore.Qt).__init__(cls, name, bases, dct)
@six.add_metaclass(MetaExample)
class ExampleBase(QtGui.QGroupBox):
"""Having a base example class allows us to extend examples in the future.
Only one class can inherit from this class in each module."""
#override these class attributes.
title = "Untitled"
def __init__(self, *args, **kwargs):
super(ExampleBase, self).__init__(title=self.title, *args, **kwargs)
@classmethod
def run_example(cls):
"""This method should be called when you want a standalone
example to start a QApplication and run itself."""
app = QtGui.QApplication(sys.argv)
example_widget = cls()
example_widget.show()
sys.exit(app.exec_())
| mit | 1,780,299,716,725,449,000 | 39.595745 | 88 | 0.63522 | false |
Lemma1/MAC-POSTS | doc_builder/sphinx-contrib/mockautodoc/tests/test_functional.py | 2 | 2182 | # -*- coding: utf-8 -*-
import os
import shutil
import tempfile
from sphinx.application import Sphinx
_fixturedir = os.path.join(os.path.dirname(__file__), 'fixture')
_fakecmd = os.path.join(os.path.dirname(__file__), 'fakecmd.py')
_tempdir = _srcdir = _outdir = None
def setup():
global _tempdir, _srcdir, _outdir
_tempdir = tempfile.mkdtemp()
_srcdir = os.path.join(_tempdir, 'src')
_outdir = os.path.join(_tempdir, 'out')
os.mkdir(_srcdir)
def teardown():
shutil.rmtree(_tempdir)
def readfile(fname):
with open(os.path.join(_outdir, fname), 'r') as f:
return f.read()
def runsphinx(text, builder, confoverrides, config=None):
f = open(os.path.join(_srcdir, 'index.rst'), 'w')
try:
f.write(text)
finally:
f.close()
app = Sphinx(_srcdir, _fixturedir, _outdir, _outdir, builder,
confoverrides)
if config:
app.config.update(config)
app.build()
def with_runsphinx(builder, confoverrides=None):
def wrapfunc(func):
def test():
src = '\n'.join(l[4:] for l in func.__doc__.splitlines()[2:])
os.mkdir(_outdir)
try:
runsphinx(src, builder, confoverrides)
func()
finally:
os.unlink(os.path.join(_srcdir, 'index.rst'))
shutil.rmtree(_outdir)
test.__name__ = func.__name__
return test
return wrapfunc
@with_runsphinx('html')
def test_buildhtml_simple():
"""Generate simple HTML
.. automodule:: package.module
:members:
:mockimport: not_existed, gevent, twisted, sqlalchemy
:mocktype: BaseModuleClass
.. automodule:: package
:members:
:mockimport: not_existed
:mocktype: BaseModuleClass
"""
content = readfile('index.html')
assert 'Some func docs' in content
assert 'somefunc' in content
assert 'Some class docs' in content
assert 'SomeModuleClass' in content
assert 'Some package/func docs' in content
assert 'package_somefunc' in content
assert 'Some package/class docs' in content
assert 'PackageSomeModuleClass' in content
| mit | 9,096,477,055,609,182,000 | 24.97619 | 73 | 0.607699 | false |
happyleavesaoc/home-assistant | homeassistant/components/device_tracker/mqtt.py | 16 | 1390 | """
Support for tracking MQTT enabled devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.mqtt/
"""
import asyncio
import logging
import voluptuous as vol
import homeassistant.components.mqtt as mqtt
from homeassistant.core import callback
from homeassistant.const import CONF_DEVICES
from homeassistant.components.mqtt import CONF_QOS
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['mqtt']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(mqtt.SCHEMA_BASE).extend({
vol.Required(CONF_DEVICES): {cv.string: mqtt.valid_subscribe_topic},
})
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up the MQTT tracker."""
devices = config[CONF_DEVICES]
qos = config[CONF_QOS]
dev_id_lookup = {}
@callback
def async_tracker_message_received(topic, payload, qos):
"""Handle received MQTT message."""
hass.async_add_job(
async_see(dev_id=dev_id_lookup[topic], location_name=payload))
for dev_id, topic in devices.items():
dev_id_lookup[topic] = dev_id
yield from mqtt.async_subscribe(
hass, topic, async_tracker_message_received, qos)
return True
| apache-2.0 | 5,199,534,800,505,077,000 | 28.574468 | 74 | 0.726619 | false |
DatapuntAmsterdam/handelsregister | web/handelsregister/datasets/hr/tests/test_import_prs.py | 1 | 3945 | import datetime
from decimal import Decimal
from django.test import TestCase
from datasets import build_hr_data
from datasets.kvkdump import models as kvk
from datasets.kvkdump import utils
from .. import models
class ImportPersoonTest(TestCase):
def setUp(self):
utils.generate_schema()
def _convert(self, p: kvk.KvkPersoon) -> models.Persoon:
build_hr_data.fill_stelselpedia()
return models.Persoon.objects.get(pk=p.pk)
def test_import_niet_natuurlijkpersoon(self):
p = kvk.KvkPersoon.objects.create(
prsid=Decimal('100000000000000000'),
prshibver=Decimal('100000000000000000'),
datumuitschrijving=None,
datumuitspraak=None,
duur=None,
faillissement='Nee',
geboortedatum=None,
geboorteland=None,
geboorteplaats=None,
geemigreerd=None,
geheim=None,
geslachtsaanduiding=None,
geslachtsnaam=None,
handlichting=None,
huwelijksdatum=None,
naam='Testpersoon B.V.',
nummer=None,
ookgenoemd=None,
persoonsrechtsvorm='BeslotenVennootschap',
redeninsolvatie=None,
rsin='000000001',
soort=None,
status=None,
toegangscode=None,
typering='rechtspersoon',
uitgebreiderechtsvorm='BeslotenVennootschap',
verkortenaam='Testpersoon Verkort B.V.',
volledigenaam='Testpersoon Volledig B.V.',
voornamen=None,
voorvoegselgeslachtsnaam=None,
rechtsvorm=None,
doelrechtsvorm=None,
rol='EIGENAAR'
)
persoon = self._convert(p)
self.assertIsNotNone(persoon)
self.assertEqual(Decimal('100000000000000000'), persoon.id)
self.assertEqual(False, persoon.faillissement)
self.assertEqual('BeslotenVennootschap', persoon.rechtsvorm)
self.assertEqual('000000001', persoon.niet_natuurlijkpersoon.rsin)
self.assertEqual('rechtspersoon', persoon.typering)
self.assertEqual('BeslotenVennootschap', persoon.uitgebreide_rechtsvorm)
self.assertEqual('Testpersoon Verkort B.V.', persoon.niet_natuurlijkpersoon.verkorte_naam)
self.assertEqual('Testpersoon Volledig B.V.', persoon.volledige_naam)
self.assertEqual('EIGENAAR', persoon.rol)
def test_import_natuurlijkpersoon(self):
p = kvk.KvkPersoon.objects.create(
prsid=Decimal('200000000000000000'),
prshibver=Decimal('200000000000000000'),
datumuitschrijving=None,
datumuitspraak=None,
duur=None,
faillissement='Nee',
geboortedatum=None,
geboorteland=None,
geboorteplaats=None,
geemigreerd=None,
geheim=None,
geslachtsaanduiding=None,
geslachtsnaam='Testpersoon',
handlichting=None,
huwelijksdatum=None,
naam='Testpersoon B.V.',
nummer=None,
ookgenoemd=None,
persoonsrechtsvorm='Eenmanszaak',
redeninsolvatie=None,
rsin='000000001',
soort=None,
status=None,
toegangscode=None,
typering='natuurlijkPersoon',
uitgebreiderechtsvorm='Eenmanszaak',
verkortenaam=None,
volledigenaam='Maarten Testpersoon',
voornamen='Maarten',
voorvoegselgeslachtsnaam=None,
rechtsvorm=None,
doelrechtsvorm=None,
rol='EIGENAAR'
)
persoon = self._convert(p)
self.assertIsNotNone(persoon)
self.assertEqual(Decimal('200000000000000000'), persoon.id)
self.assertEqual(False, persoon.faillissement)
self.assertEqual('Eenmanszaak', persoon.rechtsvorm)
self.assertEqual('natuurlijkPersoon', persoon.typering)
self.assertEqual('Eenmanszaak', persoon.uitgebreide_rechtsvorm)
self.assertEqual('Maarten Testpersoon', persoon.volledige_naam)
self.assertEqual('EIGENAAR', persoon.rol)
| mpl-2.0 | 7,984,982,030,897,106,000 | 33.304348 | 98 | 0.656781 | false |
hsuchie4/TACTIC | 3rd_party/CherryPy/cherrypy/test/test_static.py | 6 | 10954 | from cherrypy.test import test
test.prefer_parent_path()
from httplib import HTTPConnection, HTTPSConnection
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
has_space_filepath = os.path.join(curdir, 'static', 'has space.html')
bigfile_filepath = os.path.join(curdir, "static", "bigfile.log")
BIGFILE_SIZE = 1024 * 1024
import threading
import cherrypy
from cherrypy.lib import static
def setup_server():
if not os.path.exists(has_space_filepath):
open(has_space_filepath, 'wb').write('Hello, world\r\n')
if not os.path.exists(bigfile_filepath):
open(bigfile_filepath, 'wb').write("x" * BIGFILE_SIZE)
class Root:
def bigfile(self):
from cherrypy.lib import static
self.f = static.serve_file(bigfile_filepath)
return self.f
bigfile.exposed = True
bigfile._cp_config = {'response.stream': True}
def tell(self):
if self.f.input.closed:
return ''
return repr(self.f.input.tell()).rstrip('L')
tell.exposed = True
def fileobj(self):
f = open(os.path.join(curdir, 'style.css'), 'rb')
return static.serve_fileobj(f, content_type='text/css')
fileobj.exposed = True
def stringio(self):
f = StringIO.StringIO('Fee\nfie\nfo\nfum')
return static.serve_fileobj(f, content_type='text/plain')
stringio.exposed = True
class Static:
def index(self):
return 'You want the Baron? You can have the Baron!'
index.exposed = True
def dynamic(self):
return "This is a DYNAMIC page"
dynamic.exposed = True
root = Root()
root.static = Static()
rootconf = {
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
},
'/style.css': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(curdir, 'style.css'),
},
'/docroot': {
'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html',
},
'/error': {
'tools.staticdir.on': True,
'request.show_tracebacks': True,
},
}
rootApp = cherrypy.Application(root)
rootApp.merge(rootconf)
test_app_conf = {
'/test': {
'tools.staticdir.index': 'index.html',
'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
},
}
testApp = cherrypy.Application(Static())
testApp.merge(test_app_conf)
vhost = cherrypy._cpwsgi.VirtualHost(rootApp, {'virt.net': testApp})
cherrypy.tree.graft(vhost)
def teardown_server():
for f in (has_space_filepath, bigfile_filepath):
if os.path.exists(f):
try:
os.unlink(f)
except:
pass
from cherrypy.test import helper
class StaticTest(helper.CPWebCase):
def testStatic(self):
self.getPage("/static/index.html")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
# Using a staticdir.root value in a subdir...
self.getPage("/docroot/index.html")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
# Check a filename with spaces in it
self.getPage("/static/has%20space.html")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
self.getPage("/style.css")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css')
# Note: The body should be exactly 'Dummy stylesheet\n', but
# unfortunately some tools such as WinZip sometimes turn \n
# into \r\n on Windows when extracting the CherryPy tarball so
# we just check the content
self.assertMatchesBody('^Dummy stylesheet')
def test_fallthrough(self):
# Test that NotFound will then try dynamic handlers (see [878]).
self.getPage("/static/dynamic")
self.assertBody("This is a DYNAMIC page")
# Check a directory via fall-through to dynamic handler.
self.getPage("/static/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('You want the Baron? You can have the Baron!')
def test_index(self):
# Check a directory via "staticdir.index".
self.getPage("/docroot/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html')
self.assertBody('Hello, world\r\n')
# The same page should be returned even if redirected.
self.getPage("/docroot")
self.assertStatus(301)
self.assertHeader('Location', '%s/docroot/' % self.base())
self.assertMatchesBody("This resource .* <a href='%s/docroot/'>"
"%s/docroot/</a>." % (self.base(), self.base()))
def test_config_errors(self):
# Check that we get an error if no .file or .dir
self.getPage("/error/thing.html")
self.assertErrorPage(500)
self.assertInBody("TypeError: staticdir() takes at least 2 "
"arguments (0 given)")
def test_security(self):
# Test up-level security
self.getPage("/static/../../test/style.css")
self.assertStatus((400, 403))
def test_modif(self):
# Test modified-since on a reasonably-large file
self.getPage("/static/dirback.jpg")
self.assertStatus("200 OK")
lastmod = ""
for k, v in self.headers:
if k == 'Last-Modified':
lastmod = v
ims = ("If-Modified-Since", lastmod)
self.getPage("/static/dirback.jpg", headers=[ims])
self.assertStatus(304)
self.assertNoHeader("Content-Type")
self.assertNoHeader("Content-Length")
self.assertNoHeader("Content-Disposition")
self.assertBody("")
def test_755_vhost(self):
self.getPage("/test/", [('Host', 'virt.net')])
self.assertStatus(200)
self.getPage("/test", [('Host', 'virt.net')])
self.assertStatus(301)
self.assertHeader('Location', self.scheme + '://virt.net/test/')
def test_serve_fileobj(self):
self.getPage("/fileobj")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
self.assertMatchesBody('^Dummy stylesheet')
def test_serve_stringio(self):
self.getPage("/stringio")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/plain;charset=utf-8')
self.assertHeader('Content-Length', 14)
self.assertMatchesBody('Fee\nfie\nfo\nfum')
def test_file_stream(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/bigfile", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
body = ''
remaining = BIGFILE_SIZE
while remaining > 0:
data = response.fp.read(65536)
if not data:
break
body += data
remaining -= len(data)
if self.scheme == "https":
newconn = HTTPSConnection
else:
newconn = HTTPConnection
s, h, b = helper.webtest.openURL(
"/tell", headers=[], host=self.HOST, port=self.PORT,
http_conn=newconn)
if not b:
# The file was closed on the server.
tell_position = BIGFILE_SIZE
else:
tell_position = int(b)
expected = len(body)
if tell_position >= BIGFILE_SIZE:
# We can't exactly control how much content the server asks for.
# Fudge it by only checking the first half of the reads.
if expected < (BIGFILE_SIZE / 2):
self.fail(
"The file should have advanced to position %r, but has "
"already advanced to the end of the file. It may not be "
"streamed as intended, or at the wrong chunk size (64k)" %
expected)
elif tell_position < expected:
self.fail(
"The file should have advanced to position %r, but has "
"only advanced to position %r. It may not be streamed "
"as intended, or at the wrong chunk size (65536)" %
(expected, tell_position))
if body != "x" * BIGFILE_SIZE:
self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." %
(BIGFILE_SIZE, body[:50], len(body)))
conn.close()
def test_file_stream_deadlock(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request but abort early.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/bigfile", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
body = response.fp.read(65536)
if body != "x" * 65536:
self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." %
(65536, body[:50], len(body)))
response.close()
conn.close()
# Make a second request, which should fetch the whole file.
self.persistent = False
self.getPage("/bigfile")
if self.body != "x" * BIGFILE_SIZE:
self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." %
(BIGFILE_SIZE, self.body[:50], len(body)))
if __name__ == "__main__":
helper.testmain()
| epl-1.0 | -6,474,439,104,509,139,000 | 34.564935 | 82 | 0.553131 | false |
reading-website/udacity | nanodegree/c-ud032-nd/lesson4/Tires_on_MongoDB.py | 2 | 1285 | '''
Created on Jun 11, 2015
@author: ramkamal.tripathi
'''
"""
Your task is to sucessfully run the exercise to see how pymongo works
and how easy it is to start using it.
You don't actually have to change anything in this exercise,
but you can change the city name in the add_city function if you like.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB (see Instructor comments for link to installation information)
and uncomment the get_db function.
"""
def add_city(db):
# Changes to this function will be reflected in the output.
# All other functions are for local use only.
# Try changing the name of the city to be inserted
db.cities.insert({"name" : "new jersy"})
def get_city(db):
return db.cities.find_one()
def get_db():
# For local use
from pymongo import MongoClient
client = MongoClient('localhost:3129')
# 'examples' here is the database name. It will be created if it does not exist.
db = client.examples
return db
if __name__ == "__main__":
# For local use
db = get_db() # uncomment this line if you want to run this locally
add_city(db)
print get_city(db) | mit | 8,051,048,958,920,369,000 | 31 | 90 | 0.684047 | false |
nop33/indico-plugins | piwik/indico_piwik/piwik.py | 1 | 3260 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import socket
from urllib2 import urlparse
from flask_pluginengine import current_plugin
import requests
class PiwikRequest(object):
"""Wrapper for Piwik API requests"""
def __init__(self, server_url, query_script, site_id, api_token=None):
if not server_url:
raise ValueError("server_url can't be empty")
if not query_script:
raise ValueError("query_script can't be empty")
if not site_id:
raise ValueError("site_id can't be empty")
self.server_url = server_url if server_url.endswith('/') else server_url + '/'
self.query_script = query_script
self.site_id = site_id
self.api_token = api_token
@property
def api_url(self):
url = urlparse.urlparse(self.server_url)
scheme = url.scheme if url.scheme else 'https'
return '{}://{}{}{}'.format(scheme, url.netloc, url.path, self.query_script)
def call(self, default_response=None, **query_params):
"""Perform a query to the Piwik server and return the response.
:param default_response: Return value in case the query fails
:param query_params: Dictionary with the parameters of the query
"""
query_url = self.get_query_url(**query_params)
return self._perform_call(query_url, default_response)
def get_query(self, query_params=None):
"""Return a query string"""
if query_params is None:
query_params = {}
query = ''
query_params['idSite'] = self.site_id
if self.api_token is not None:
query_params['token_auth'] = self.api_token
for key, value in query_params.iteritems():
if isinstance(value, list):
value = ','.join(value)
query += '{}={}&'.format(str(key), str(value))
return query[:-1]
def get_query_url(self, **query_params):
"""Return the url for a Piwik API query"""
return '{}?{}'.format(self.api_url, self.get_query(query_params))
def _perform_call(self, query_url, default_response=None, timeout=10):
"""Returns the raw results from the API"""
try:
response = requests.get(query_url, timeout=timeout)
except socket.timeout:
current_plugin.logger.warning("Timeout contacting Piwik server")
return default_response
except Exception:
current_plugin.logger.exception("Unable to connect")
return default_response
return response.content
| gpl-3.0 | 2,523,016,141,578,782,000 | 38.756098 | 86 | 0.646012 | false |
tensorflow/agents | tf_agents/drivers/dynamic_episode_driver_test.py | 1 | 10431 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.drivers.dynamic_episode_driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.environments import environment_utilities
from tf_agents.bandits.environments import stationary_stochastic_py_environment as sspe
from tf_agents.drivers import dynamic_episode_driver
from tf_agents.drivers import test_utils as driver_test_utils
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import test_utils
class DynamicEpisodeDriverTest(test_utils.TestCase):
def testPolicyState(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
num_episodes_observer = driver_test_utils.NumEpisodesObserver()
num_steps_observer = driver_test_utils.NumStepsObserver()
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env, policy, observers=[num_episodes_observer, num_steps_observer])
run_driver = driver.run()
self.evaluate(tf.compat.v1.global_variables_initializer())
time_step, policy_state = self.evaluate(run_driver)
self.assertEqual(time_step.step_type, 0)
self.assertEqual(policy_state, [3])
def testContinuePreviusRun(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
num_episodes_observer = driver_test_utils.NumEpisodesObserver()
num_steps_observer = driver_test_utils.NumStepsObserver()
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env, policy, observers=[num_episodes_observer, num_steps_observer])
time_step, policy_state = driver.run()
time_step, policy_state = driver.run(time_step, policy_state)
self.evaluate(tf.compat.v1.global_variables_initializer())
time_step, policy_state = self.evaluate([time_step, policy_state])
self.assertEqual(time_step.step_type, 0)
self.assertEqual(policy_state, [3])
def testOneStepUpdatesObservers(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
num_episodes_observer = driver_test_utils.NumEpisodesObserver()
num_steps_observer = driver_test_utils.NumStepsObserver()
num_steps_transition_observer = (
driver_test_utils.NumStepsTransitionObserver())
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env,
policy,
observers=[num_episodes_observer, num_steps_observer],
transition_observers=[num_steps_transition_observer])
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(5):
self.evaluate(driver.run())
self.assertEqual(self.evaluate(num_episodes_observer.num_episodes), 5)
# Two steps per episode.
self.assertEqual(self.evaluate(num_steps_observer.num_steps), 10)
self.assertEqual(self.evaluate(num_steps_transition_observer.num_steps), 10)
def testMultiStepUpdatesObservers(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
num_episodes_observer = driver_test_utils.NumEpisodesObserver()
num_steps_observer = driver_test_utils.NumStepsObserver()
num_steps_transition_observer = (
driver_test_utils.NumStepsTransitionObserver())
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env,
policy,
observers=[num_episodes_observer, num_steps_observer],
transition_observers=[num_steps_transition_observer])
run_driver = driver.run(num_episodes=5)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(run_driver)
self.assertEqual(self.evaluate(num_episodes_observer.num_episodes), 5)
# Two steps per episode.
self.assertEqual(self.evaluate(num_steps_observer.num_steps), 10)
self.assertEqual(self.evaluate(num_steps_transition_observer.num_steps), 10)
def testTwoStepObservers(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
num_episodes_observer0 = driver_test_utils.NumEpisodesObserver(
variable_scope='observer0')
num_episodes_observer1 = driver_test_utils.NumEpisodesObserver(
variable_scope='observer1')
num_steps_transition_observer = (
driver_test_utils.NumStepsTransitionObserver())
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env,
policy,
num_episodes=5,
observers=[num_episodes_observer0, num_episodes_observer1],
transition_observers=[num_steps_transition_observer])
run_driver = driver.run()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(run_driver)
self.assertEqual(self.evaluate(num_episodes_observer0.num_episodes), 5)
self.assertEqual(self.evaluate(num_episodes_observer1.num_episodes), 5)
self.assertEqual(self.evaluate(num_steps_transition_observer.num_steps), 10)
def testOneStepReplayBufferObservers(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
replay_buffer = driver_test_utils.make_replay_buffer(policy)
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env, policy, num_episodes=1, observers=[replay_buffer.add_batch])
run_driver = driver.run if tf.executing_eagerly() else driver.run()
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(3):
self.evaluate(run_driver)
trajectories = self.evaluate(replay_buffer.gather_all())
self.assertAllEqual(trajectories.step_type, [[0, 1, 2, 0, 1, 2, 0, 1, 2]])
self.assertAllEqual(trajectories.action, [[1, 2, 1, 1, 2, 1, 1, 2, 1]])
self.assertAllEqual(trajectories.observation, [[0, 1, 3, 0, 1, 3, 0, 1, 3]])
self.assertAllEqual(trajectories.policy_info, [[2, 4, 2, 2, 4, 2, 2, 4, 2]])
self.assertAllEqual(trajectories.next_step_type,
[[1, 2, 0, 1, 2, 0, 1, 2, 0]])
self.assertAllEqual(trajectories.reward,
[[1., 1., 0., 1., 1., 0., 1., 1., 0.]])
self.assertAllEqual(trajectories.discount,
[[1., 0., 1, 1, 0, 1., 1., 0., 1.]])
def testMultiStepReplayBufferObservers(self):
env = tf_py_environment.TFPyEnvironment(
driver_test_utils.PyEnvironmentMock())
policy = driver_test_utils.TFPolicyMock(env.time_step_spec(),
env.action_spec())
replay_buffer = driver_test_utils.make_replay_buffer(policy)
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env, policy, num_episodes=3, observers=[replay_buffer.add_batch])
run_driver = driver.run()
rb_gather_all = replay_buffer.gather_all()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(run_driver)
trajectories = self.evaluate(rb_gather_all)
self.assertAllEqual(trajectories.step_type, [[0, 1, 2, 0, 1, 2, 0, 1, 2]])
self.assertAllEqual(trajectories.action, [[1, 2, 1, 1, 2, 1, 1, 2, 1]])
self.assertAllEqual(trajectories.observation, [[0, 1, 3, 0, 1, 3, 0, 1, 3]])
self.assertAllEqual(trajectories.policy_info, [[2, 4, 2, 2, 4, 2, 2, 4, 2]])
self.assertAllEqual(trajectories.next_step_type,
[[1, 2, 0, 1, 2, 0, 1, 2, 0]])
self.assertAllEqual(trajectories.reward,
[[1., 1., 0., 1., 1., 0., 1., 1., 0.]])
self.assertAllEqual(trajectories.discount,
[[1., 0., 1., 1., 0., 1., 1., 0., 1.]])
def testBanditEnvironment(self):
def _context_sampling_fn():
return np.array([[5, -5], [2, -2]])
reward_fns = [
environment_utilities.LinearNormalReward(theta, sigma=0.0)
for theta in ([1, 0], [0, 1])
]
batch_size = 2
py_env = sspe.StationaryStochasticPyEnvironment(
_context_sampling_fn, reward_fns, batch_size=batch_size)
env = tf_py_environment.TFPyEnvironment(py_env)
policy = random_tf_policy.RandomTFPolicy(env.time_step_spec(),
env.action_spec())
steps_per_loop = 4
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=policy.trajectory_spec,
batch_size=batch_size,
max_length=steps_per_loop)
driver = dynamic_episode_driver.DynamicEpisodeDriver(
env,
policy,
num_episodes=steps_per_loop * batch_size,
observers=[replay_buffer.add_batch])
run_driver = driver.run()
rb_gather_all = replay_buffer.gather_all()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(run_driver)
trajectories = self.evaluate(rb_gather_all)
self.assertAllEqual(trajectories.step_type, [[0, 0, 0, 0], [0, 0, 0, 0]])
self.assertAllEqual(trajectories.next_step_type,
[[2, 2, 2, 2], [2, 2, 2, 2]])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -7,286,345,521,495,496,000 | 40.229249 | 87 | 0.669926 | false |
kevinzhang1986/codezero | scripts/loader/generate_loader_asm.py | 3 | 2782 | #! /usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
#
# Codezero -- a microkernel for embedded systems.
#
# Copyright © 2009 B Labs Ltd
#
import os, sys, shelve, subprocess
from os.path import join
PROJRELROOT = '../../'
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), PROJRELROOT)))
from scripts.config.projpaths import *
from scripts.config.configuration import *
from scripts.config.lib import *
from scripts.config.config_invoke import *
config = configuration_retrieve()
# Convert address from python literal to numeric value
def address_remove_literal(address):
value = hex(int(address, 16) - 0xf0000000)
if value[-1] in ['l', 'L']:
value = value[:-1]
return value
ksym_header = \
'''
/*
* %s autogenerated from %s.
* by %s
*
* This file is included by the loader sources so that any
* kernel symbol address can be known in advance and stopped
* at by debuggers before virtual memory is enabled.
*/
'''
assembler_symbol_definition = \
'''
.section .text
.align 4
.global %s
.type %s, function
.equ %s, %s
'''
def generate_ksym_to_loader(target_path, source_path):
symbols = ['break_virtual']
with open(target_path, 'w') as asm_file:
asm_file.write(ksym_header % (target_path, source_path, sys.argv[0]))
for symbol in symbols:
process = \
subprocess.Popen(config.toolchain_kernel + 'objdump -d ' + \
source_path + ' | grep "<' + \
symbol + '>"', shell=True, \
stdout=subprocess.PIPE)
assert process.wait() == 0
address, name = process.stdout.read().split()
assert '<' + symbol + '>:' == name
asm_file.write(assembler_symbol_definition % \
(symbol, symbol, symbol, \
address_remove_literal(address)))
decl_sect_asm = \
'''
.align 4
.section %s
.incbin "%s"
'''
def generate_image_S(target_path, images):
kern_fname = 'kernel.elf'
conts_fname = 'containers.elf'
fbody = ''
with open(target_path, 'w+') as images_S:
for img in images:
if os.path.basename(img.path) == kern_fname:
fbody += decl_sect_asm % ('.kernel', img)
if os.path.basename(img.path) == conts_fname:
fbody += decl_sect_asm % ('.containers', img)
images_S.write(fbody)
if __name__ == "__main__":
if len(sys.argv) == 1:
generate_ksym_to_loader(join(PROJROOT, 'loader/ksyms.S'), KERNEL_ELF)
elif len(sys.argv) == 3:
generate_ksym_to_loader(sys.argv[1], sys.argv[1])
else:
print "Usage: %s <asm filename> <kernel image filename>" % sys.argv[0]
| gpl-3.0 | -4,357,346,402,972,801,500 | 28.903226 | 86 | 0.583243 | false |
jrha/aquilon | tests/broker/test_show_fqdn.py | 2 | 1884 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the show fqdn command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestShowFqdn(TestBrokerCommand):
def testshowfqdnall(self):
command = "show fqdn --all"
(out, err) = self.successtest(command.split(" "))
# The aq client does not ask for this...
#self.matchoutput(err, "The show_fqdn command is deprecated.", command)
# Chassis
self.matchoutput(out, "ut3c1.aqd-unittest.ms.com", command)
# TorSwitch
self.matchoutput(out, "ut3gd1r01.aqd-unittest.ms.com", command)
# Aurora Host
self.matchoutput(out, "pissp1.ms.com", command)
# Aquilon Host
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
# Auxiliary
self.matchoutput(out, "unittest00-e1.one-nyp.ms.com", command)
# Windows Host
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestShowFqdn)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | 906,808,777,873,243,900 | 34.54717 | 79 | 0.685775 | false |
simphony/simphony-openfoam | foam_internalwrapper/mesh_utils.py | 1 | 6310 | """ Utility functions fo FoamMesh class
"""
import simphonyfoaminterface as foamface
from simphony.core.cuba import CUBA
from simphony.cuds.mesh import Cell
from simphony.cuds.meta.api import PhaseVolumeFraction
from foam_controlwrapper.foam_variables import (dataTypeMap, dataKeyMap,
phaseNames)
from foam_controlwrapper.foam_variables import dataDimensionMap
from foam_controlwrapper.foam_variables import cellDataTypes
def create_dummy_celldata(name, data_name, io=False):
"""Creates dummy cell data to OpenFoams objectRegistry and
writes to case directory if path defined
Parameters
----------
name : str
Name of mesh
data_name : str
Name of data to be created
io : boolean
if True write data to disk
"""
nCells = foamface.getCellCount(name)
iomode = 1 if io else 0
if dataTypeMap[dataKeyMap[data_name]] in cellDataTypes:
dimension = dataDimensionMap[dataKeyMap[data_name]]
if dataTypeMap[dataKeyMap[data_name]] == 'vector':
values = [0.0] * (3 * nCells)
foamface.setAllCellVectorData(name, data_name, iomode, values,
dimension)
elif dataTypeMap[dataKeyMap[data_name]] == 'scalar':
values = [0.0] * nCells
if dataKeyMap[data_name] == CUBA.VOLUME_FRACTION:
foamface.setAllCellData(name,
data_name + '.' + phaseNames[0],
iomode, values, dimension)
else:
foamface.setAllCellData(name, data_name, iomode, values,
dimension)
elif dataTypeMap[dataKeyMap[data_name]] == 'tensor':
values = [0.0] * (nCells * 9)
foamface.setAllCellTensorData(name, data_name, iomode, values,
dimension)
def set_cells_data(name, cells, dataNameKeyMap, materials, io=False):
"""Set data to specific cells
Parameters
----------
name : str
name of mesh
cells : list Cell
list of Cells
dataNameKeyMap : dictionary
variables name map to CUBA keys (only variables to be saved)
materials: dictionary
map from phase name to material
io : boolean
if True write data to disk
"""
iomode = 1 if io else 0
for dName in dataNameKeyMap:
dataName, _, _ = dName.partition('.')
dataKey = dataNameKeyMap[dataName]
dimension = dataDimensionMap[dataKey]
if dataTypeMap[dataKey] in cellDataTypes:
if dataTypeMap[dataKey] == "scalar":
data = []
if dataKey == CUBA.VOLUME_FRACTION:
material1 = materials[phaseNames[0]]
for cell in cells:
if cell.data[dataKey][0].material == material1:
data.append(cell.data[dataKey][0]
.volume_fraction)
else:
data.append(cell.data[dataKey][1]
.volume_fraction)
dName = dataName + '.' + phaseNames[0]
foamface.setAllCellData(name, dName, iomode, data,
dimension)
else:
for cell in cells:
data.append(cell.data[dataKey])
foamface.setAllCellData(name, dataName, iomode,
data, dimension)
elif dataTypeMap[dataKey] == "vector":
data = []
for cell in cells:
for val in cell.data[dataKey]:
data.append(val)
foamface.setAllCellVectorData(name, dataName, iomode,
data, dimension)
elif dataTypeMap[dataKey] == "tensor":
data = []
for cell in cells:
for val in cell.data[dataKey]:
data.append(val)
foamface.setAllCellTensorData(name, dataName, iomode,
data, dimension)
def get_cells_in_range(args):
""" get list of cells on given label range
Parameters
----------
args: list
list of parameters
args[0] - cell start label
args[1] - cell end label
args[2] - packed list of all cells point indices
args[3] - datamap (key, data) pair
args[4] - foamCellLabelToUuid
args[5] - foamPhaseNameToMaterial
"""
cell_start = args[0]
cell_end = args[1]
cells_puids = args[2]
data_map = args[3]
foamCellLabelToUuid = args[4]
foamPhaseNameToMaterial = args[5]
cells = []
for cell_label in range(cell_start, cell_end + 1, 1):
cell = Cell(cells_puids[cell_label],
foamCellLabelToUuid[cell_label])
for dataKey, data in data_map.iteritems():
if dataTypeMap[dataKey] == "scalar":
if dataKey == CUBA.VOLUME_FRACTION:
if foamPhaseNameToMaterial:
material1 = foamPhaseNameToMaterial[
phaseNames[0]]
material2 = foamPhaseNameToMaterial[
phaseNames[1]]
vol_frac1 = data[cell_label]
phase1_vol_frac = PhaseVolumeFraction(
material1, vol_frac1)
phase2_vol_frac = PhaseVolumeFraction(
material2, 1 - vol_frac1)
cell.data[dataKey] = [phase1_vol_frac,
phase2_vol_frac]
else:
cell.data[dataKey] = data[cell_label]
elif dataTypeMap[dataKey] == "vector":
cell.data[dataKey] = \
[data[cell_label * 3 + k] for k in range(3)]
elif dataTypeMap[dataKey] == "tensor":
cell.data[dataKey] = \
[data[cell_label * 9 + k] for k in range(9)]
cells.append(cell)
return cells
| gpl-2.0 | 8,249,469,313,158,423,000 | 38.192547 | 74 | 0.514263 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.