repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/mapping_tests.py
|
4
|
22268
|
# tests common to dict and UserDict
import unittest
import collections
import sys
class BasicTestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
type2test = None # which class is being tested (overwrite in subclasses)
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {"1": "2", "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key:value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key:value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.assertRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#__contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
#cmp
self.assertEqual(p, p)
self.assertEqual(d, d)
self.assertNotEqual(p, d)
self.assertNotEqual(d, p)
#bool
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, '__next__'))
self.assertTrue(hasattr(iter, '__iter__'))
x = list(iter)
self.assertTrue(set(x)==set(lst)==set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()),
self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()),
self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()),
self.reference.items())
#get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.assertRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = list(p.items())
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefault
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertNotIn(knownkey, d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertNotIn(knownkey, d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.assertNotIn(key, d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assertTrue(not self._empty_mapping())
self.assertTrue(self.reference)
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(list(self.inmapping.keys())[0], d.keys())
self.assertNotIn(list(self.other.keys())[0], d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(list(d.values()), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(list(d.items()), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(d[list(self.inmapping.keys())[0]],
list(self.inmapping.values())[0])
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted(d.items())
i2 = sorted(self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception): pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
d = self.reference
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
self.assertEqual(d.get(list(self.inmapping.keys())[0]),
list(self.inmapping.values())[0])
self.assertEqual(d.get(list(self.inmapping.keys())[0], 3),
list(self.inmapping.values())[0])
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = list(self.inmapping.items())[0]
d[k] = v
self.assertRaises(KeyError, d.pop, list(self.other.keys())[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
class TestMappingProtocol(BasicTestMappingProtocol):
def test_constructor(self):
BasicTestMappingProtocol.test_constructor(self)
self.assertTrue(self._empty_mapping() is not self._empty_mapping())
self.assertEqual(self.type2test(x=1, y=2), {"x": 1, "y": 2})
def test_bool(self):
BasicTestMappingProtocol.test_bool(self)
self.assertTrue(not self._empty_mapping())
self.assertTrue(self._full_mapping({"x": "y"}))
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self._full_mapping({"x": "y"})) is True)
def test_keys(self):
BasicTestMappingProtocol.test_keys(self)
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
self.assertIn('a', k)
self.assertIn('b', k)
self.assertNotIn('c', k)
def test_values(self):
BasicTestMappingProtocol.test_values(self)
d = self._full_mapping({1:2})
self.assertEqual(list(d.values()), [2])
def test_items(self):
BasicTestMappingProtocol.test_items(self)
d = self._full_mapping({1:2})
self.assertEqual(list(d.items()), [(1, 2)])
def test_contains(self):
d = self._empty_mapping()
self.assertNotIn('a', d)
self.assertTrue(not ('a' in d))
self.assertTrue('a' not in d)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
BasicTestMappingProtocol.test_len(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
BasicTestMappingProtocol.test_getitem(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
def test_clear(self):
d = self._full_mapping({1:1, 2:2, 3:3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
BasicTestMappingProtocol.test_update(self)
# mapping argument
d = self._empty_mapping()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
# no argument
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
# keyword arguments
d = self._empty_mapping()
d.update(x=100)
d.update(y=20)
d.update(x=1, y=2, z=3)
self.assertEqual(d, {"x":1, "y":2, "z":3})
# item sequence
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)])
self.assertEqual(d, {"x":100, "y":20})
# Both item sequence and keyword arguments
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)], x=1, y=2)
self.assertEqual(d, {"x":1, "y":2})
# iterator
d = self._full_mapping({1:3, 2:4})
d.update(self._full_mapping({1:2, 3:4, 5:6}).items())
self.assertEqual(d, {1:2, 2:4, 3:4, 5:6})
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
def test_fromkeys(self):
self.assertEqual(self.type2test.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._empty_mapping()
self.assertTrue(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(self.type2test): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertTrue(dictlike.fromkeys('a').__class__ is dictlike)
self.assertTrue(dictlike().fromkeys('a').__class__ is dictlike)
self.assertTrue(type(dictlike.fromkeys('a')) is dictlike)
class mydict(self.type2test):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(self.type2test):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, self.type2test.fromkeys, BadSeq())
class baddict2(self.type2test):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
def test_copy(self):
d = self._full_mapping({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
d = self._empty_mapping()
self.assertEqual(d.copy(), d)
self.assertIsInstance(d.copy(), d.__class__)
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
BasicTestMappingProtocol.test_get(self)
d = self._empty_mapping()
self.assertTrue(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = self._full_mapping({'a' : 1, 'b' : 2})
self.assertTrue(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
def test_setdefault(self):
BasicTestMappingProtocol.test_setdefault(self)
d = self._empty_mapping()
self.assertTrue(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assertTrue(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
def test_popitem(self):
BasicTestMappingProtocol.test_popitem(self)
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = self._empty_mapping()
b = self._empty_mapping()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertTrue(not(copymode < 0 and ta != tb))
self.assertTrue(not a)
self.assertTrue(not b)
def test_pop(self):
BasicTestMappingProtocol.test_pop(self)
# Tests for pop with specified key
d = self._empty_mapping()
k, v = 'abc', 'def'
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
class TestHashMappingProtocol(TestMappingProtocol):
def test_getitem(self):
TestMappingProtocol.test_getitem(self)
class Exc(Exception): pass
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = self._empty_mapping()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_fromkeys(self):
TestMappingProtocol.test_fromkeys(self)
class mydict(self.type2test):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
def test_pop(self):
TestMappingProtocol.test_pop(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = self._empty_mapping()
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = self._empty_mapping()
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = self._empty_mapping()
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = self._full_mapping({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_repr_deep(self):
d = self._empty_mapping()
for i in range(sys.getrecursionlimit() + 100):
d0 = d
d = self._empty_mapping()
d[1] = d0
self.assertRaises(RecursionError, repr, d)
def test_eq(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
self.assertEqual(self._full_mapping({1: 2}),
self._full_mapping({1: 2}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = self._full_mapping({BadCmp(): 1})
d2 = self._full_mapping({1: 1})
self.assertRaises(Exc, lambda: BadCmp()==1)
self.assertRaises(Exc, lambda: d1==d2)
def test_setdefault(self):
TestMappingProtocol.test_setdefault(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
|
apache-2.0
|
Taapat/enigma2-openpli-fulan
|
lib/python/Components/HdmiCec.py
|
1
|
14921
|
import struct, os, time
from config import config, ConfigSelection, ConfigYesNo, ConfigSubsection, ConfigText
from enigma import eHdmiCEC, eActionMap
from Components.VolumeControl import VolumeControl
from Tools.StbHardware import getFPWasTimerWakeup
from enigma import eTimer
from Screens import Standby
from Tools import Directories, Notifications
from time import time
import xml.etree.cElementTree
config.hdmicec = ConfigSubsection()
config.hdmicec.enabled = ConfigYesNo(default = True)
config.hdmicec.control_tv_standby = ConfigYesNo(default = True)
config.hdmicec.control_tv_wakeup = ConfigYesNo(default = True)
config.hdmicec.report_active_source = ConfigYesNo(default = True)
config.hdmicec.report_active_menu = ConfigYesNo(default = True)
config.hdmicec.handle_tv_standby = ConfigYesNo(default = True)
config.hdmicec.handle_tv_wakeup = ConfigYesNo(default = True)
config.hdmicec.tv_wakeup_detection = ConfigSelection(
choices = {
"wakeup": _("Wakeup"),
"tvreportphysicaladdress": _("TV physical address report"),
"sourcerequest": _("Source request"),
"streamrequest": _("Stream request"),
"osdnamerequest": _("OSD name request"),
"vendorid": _("Vendor ID (LG)"),
"activity": _("Any activity"),
},
default = "streamrequest")
config.hdmicec.fixed_physical_address = ConfigText(default = "0.0.0.0")
config.hdmicec.volume_forwarding = ConfigYesNo(default = False)
config.hdmicec.control_receiver_wakeup = ConfigYesNo(default = False)
config.hdmicec.control_receiver_standby = ConfigYesNo(default = False)
config.hdmicec.handle_deepstandby_events = ConfigYesNo(default = False)
choicelist = []
for i in (10, 50, 100, 150, 250, 500, 750, 1000):
choicelist.append(("%d" % i, _("%d ms") % i))
config.hdmicec.minimum_send_interval = ConfigSelection(default = "0", choices = [("0", _("Disabled"))] + choicelist)
choicelist = []
for i in [3] + range(5, 65, 5):
choicelist.append(("%d" % i, _("%d sec") % i))
config.hdmicec.repeat_wakeup_timer = ConfigSelection(default = "3", choices = [("0", _("Disabled"))] + choicelist)
class HdmiCec:
def __init__(self):
assert not HdmiCec.instance, "only one HdmiCec instance is allowed!"
HdmiCec.instance = self
self.wait = eTimer()
self.wait.timeout.get().append(self.sendCmd)
self.repeat = eTimer()
self.repeat.timeout.get().append(self.wakeupMessages)
self.queue = []
eHdmiCEC.getInstance().messageReceived.get().append(self.messageReceived)
config.misc.standbyCounter.addNotifier(self.onEnterStandby, initial_call = False)
config.misc.DeepStandby.addNotifier(self.onEnterDeepStandby, initial_call = False)
self.setFixedPhysicalAddress(config.hdmicec.fixed_physical_address.value)
self.logicaladdress = eHdmiCEC.getInstance().getLogicalAddress()
self.saveVolUp = None
self.saveVolDown = None
self.saveVolMute = None
self.volumeForwardingDestination = 0
config.hdmicec.volume_forwarding.addNotifier(self.configVolumeForwarding)
config.hdmicec.enabled.addNotifier(self.configVolumeForwarding)
uptime = float(open("/proc/uptime", "r").read().split()[0])
if config.hdmicec.enabled.value and config.hdmicec.handle_deepstandby_events.value and uptime < 120:
filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
doc = xml.etree.cElementTree.parse(filename)
except:
doc = None
if doc:
root = doc.getroot()
for timer in root.findall("timer"):
begin = int(timer.get("begin"))
disabled = long(timer.get("disabled") or "0")
justplay = long(timer.get("justplay") or "0")
always_zap = long(timer.get("always_zap") or "0")
if begin < time() or begin > time() + 360 or disabled or justplay or always_zap:
continue
if Standby.inStandby is None:
Notifications.AddNotification(Standby.Standby)
return
self.onLeaveStandby()
def getPhysicalAddress(self):
physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress()
hexstring = '%04x' % physicaladdress
return hexstring[0] + '.' + hexstring[1] + '.' + hexstring[2] + '.' + hexstring[3]
def setFixedPhysicalAddress(self, address):
if address != config.hdmicec.fixed_physical_address.value:
config.hdmicec.fixed_physical_address.value = address
config.hdmicec.fixed_physical_address.save()
hexstring = address[0] + address[2] + address[4] + address[6]
eHdmiCEC.getInstance().setFixedPhysicalAddress(int(float.fromhex(hexstring)))
def sendMessage(self, address, message):
cmd = 0
data = ''
if message == "wakeup":
cmd = 0x04
elif message == "sourceactive":
address = self.logicaladdress * 0x10 + 0x0f # use broadcast for active source command
cmd = 0x82
physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress()
data = str(struct.pack('BB', int(physicaladdress/256), int(physicaladdress%256)))
elif message == "standby":
cmd = 0x36
elif message == "sourceinactive":
physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress()
cmd = 0x9d
data = str(struct.pack('BB', int(physicaladdress/256), int(physicaladdress%256)))
elif message == "menuactive":
cmd = 0x8e
data = str(struct.pack('B', 0x00))
elif message == "menuinactive":
cmd = 0x8e
data = str(struct.pack('B', 0x01))
elif message == "givesystemaudiostatus":
cmd = 0x7d
address = self.logicaladdress * 0x10 + 0x05
elif message == "setsystemaudiomode":
cmd = 0x70
address = self.logicaladdress * 0x10 + 0x05
physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress()
data = str(struct.pack('BB', int(physicaladdress/256), int(physicaladdress%256)))
elif message == "osdname":
address = self.logicaladdress * 0x10
cmd = 0x47
data = "Enigma2"
elif message == "poweractive":
address = self.logicaladdress * 0x10
cmd = 0x90
data = str(struct.pack('B', 0x00))
elif message == "powerinactive":
address = self.logicaladdress * 0x10
cmd = 0x90
data = str(struct.pack('B', 0x01))
elif message == "poweron":
address = self.logicaladdress * 0x10
cmd = 0x90
data = str(struct.pack('B', 0x02))
elif message == "reportaddress":
address = self.logicaladdress * 0x10 + 0x0f # use broadcast address
cmd = 0x84
physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress()
devicetype = eHdmiCEC.getInstance().getDeviceType()
data = str(struct.pack('BBB', int(physicaladdress/256), int(physicaladdress%256), devicetype))
elif message == "vendorid":
address = self.logicaladdress * 0x10 + 0x0f
cmd = 0x87
data = '\x00\xE0\x91'
elif message == "keypoweron":
cmd = 0x44
data = str(struct.pack('B', 0x6d))
elif message == "keypoweroff":
cmd = 0x44
data = str(struct.pack('B', 0x6c))
elif message == "playstatus":
address = self.logicaladdress * 0x10
cmd = 0x1B
data = '\x20'
elif message == "vendorcommand0":
address = self.logicaladdress * 0x10
cmd = 0x89
data = '\x02\x05'
elif message == "vendorcommand1":
address = self.logicaladdress * 0x10
cmd = 0x89
data = '\xA1\x01'
elif message == "vendorcommand2":
address = self.logicaladdress * 0x10
cmd = 0x89
data = '\x0C\x05'
elif message == "vendorcommand3":
address = self.logicaladdress * 0x10
cmd = 0x89
data = '\x05\x04'
if cmd:
if config.hdmicec.minimum_send_interval.value != "0":
self.queue.append((address, cmd, data))
if not self.wait.isActive():
self.wait.start(int(config.hdmicec.minimum_send_interval.value), True)
else:
eHdmiCEC.getInstance().sendMessage(address, cmd, data, len(data))
def sendCmd(self):
if len(self.queue):
(address, cmd, data) = self.queue.pop(0)
eHdmiCEC.getInstance().sendMessage(address, cmd, data, len(data))
self.wait.start(int(config.hdmicec.minimum_send_interval.value), True)
def sendMessages(self, address, messages):
for message in messages:
self.sendMessage(address, message)
def wakeupMessages(self):
if config.hdmicec.enabled.value:
messages = []
if config.hdmicec.control_tv_wakeup.value:
messages.append("wakeup")
if config.hdmicec.report_active_source.value:
messages.append("sourceactive")
if config.hdmicec.report_active_menu.value:
messages.append("menuactive")
if messages:
self.sendMessages(0, messages)
if config.hdmicec.control_receiver_wakeup.value:
self.sendMessage(5, "keypoweron")
self.sendMessage(5, "setsystemaudiomode")
def standbyMessages(self):
if config.hdmicec.enabled.value:
messages = []
if config.hdmicec.control_tv_standby.value:
messages.append("standby")
else:
if config.hdmicec.report_active_source.value:
messages.append("sourceinactive")
if config.hdmicec.report_active_menu.value:
messages.append("menuinactive")
if messages:
self.sendMessages(0, messages)
if config.hdmicec.control_receiver_standby.value:
self.sendMessage(5, "keypoweroff")
self.sendMessage(5, "standby")
def onLeaveStandby(self):
self.wakeupMessages()
if int(config.hdmicec.repeat_wakeup_timer.value):
self.repeat.startLongTimer(int(config.hdmicec.repeat_wakeup_timer.value))
def onEnterStandby(self, configElement):
from Screens.Standby import inStandby
inStandby.onClose.append(self.onLeaveStandby)
self.repeat.stop()
self.standbyMessages()
def onEnterDeepStandby(self, configElement):
if config.hdmicec.handle_deepstandby_events.value:
self.standbyMessages()
def standby(self):
from Screens.Standby import Standby, inStandby
if not inStandby:
from Tools import Notifications
Notifications.AddNotification(Standby)
def wakeup(self):
from Screens.Standby import inStandby
if inStandby:
inStandby.Power()
def messageReceived(self, message):
if config.hdmicec.enabled.value:
from Screens.Standby import inStandby
cmd = message.getCommand()
data = 16 * '\x00'
length = message.getData(data, len(data))
if cmd == 0x00: # feature abort
if data[0] == '\x44':
print 'eHdmiCec: volume forwarding not supported by device %02x'%(message.getAddress())
self.volumeForwardingDisable()
elif cmd == 0x89:
if data[0] == '\x01':
self.sendMessage(message.getAddress(), 'vendorcommand0')
if data[0] == '\xA0':
if inStandby:
self.sendMessage(message.getAddress(), 'poweron')
else:
self.sendMessage(message.getAddress(), 'poweractive')
if data[0] == '\x0B':
self.sendMessage(message.getAddress(), 'vendorcommand2')
if data[0] == '\x04':
self.sendMessage(message.getAddress(), 'vendorcommand3')
elif cmd == 0x1A: # request name
self.sendMessage(message.getAddress(), 'playstatus')
elif cmd == 0x46: # request name
self.sendMessage(message.getAddress(), 'osdname')
elif cmd == 0x7e or cmd == 0x72: # system audio mode status
if data[0] == '\x01':
self.volumeForwardingDestination = 5 # on: send volume keys to receiver
else:
self.volumeForwardingDestination = 0 # off: send volume keys to tv
if config.hdmicec.volume_forwarding.value:
print 'eHdmiCec: volume forwarding to device %02x enabled'%(self.volumeForwardingDestination)
self.volumeForwardingEnable()
elif cmd == 0x8f: # request power status
if inStandby:
self.sendMessage(message.getAddress(), 'powerinactive')
else:
self.sendMessage(message.getAddress(), 'poweractive')
elif cmd == 0x83: # request address
self.sendMessage(message.getAddress(), 'reportaddress')
elif cmd == 0x86: # request streaming path
physicaladdress = ord(data[0]) * 256 + ord(data[1])
ouraddress = eHdmiCEC.getInstance().getPhysicalAddress()
if physicaladdress == ouraddress:
if not inStandby:
if config.hdmicec.report_active_source.value:
self.sendMessage(message.getAddress(), 'sourceactive')
elif cmd == 0x85: # request active source
if not inStandby:
if config.hdmicec.report_active_source.value:
self.sendMessage(message.getAddress(), 'sourceactive')
elif cmd == 0x8c: # request vendor id
self.sendMessage(message.getAddress(), 'vendorid')
elif cmd == 0x8d: # menu request
requesttype = ord(data[0])
if requesttype == 2: # query
if inStandby:
self.sendMessage(message.getAddress(), 'menuinactive')
else:
self.sendMessage(message.getAddress(), 'menuactive')
# handle standby request from the tv
if cmd == 0x36 and config.hdmicec.handle_tv_standby.value:
self.standby()
# handle wakeup requests from the tv
if config.hdmicec.handle_tv_wakeup.value:
if cmd == 0x04 and config.hdmicec.tv_wakeup_detection.value == "wakeup":
self.wakeup()
elif cmd == 0x84 and config.hdmicec.tv_wakeup_detection.value == "tvreportphysicaladdress":
if (ord(data[0]) * 256 + ord(data[1])) == 0 and ord(data[2]) == 0:
self.wakeup()
elif cmd == 0x85 and config.hdmicec.tv_wakeup_detection.value == "sourcerequest":
self.wakeup()
elif cmd == 0x86 and config.hdmicec.tv_wakeup_detection.value == "streamrequest":
physicaladdress = ord(data[0]) * 256 + ord(data[1])
ouraddress = eHdmiCEC.getInstance().getPhysicalAddress()
if physicaladdress == ouraddress:
self.wakeup()
elif cmd == 0x46 and config.hdmicec.tv_wakeup_detection.value == "osdnamerequest":
self.wakeup()
elif cmd == 0x87 and (ord(data[0])==0x00 and ord(data[1])==0xE0 and ord(data[2])==0x91) and config.hdmicec.tv_wakeup_detection.value == "vendorid":
self.wakeup()
elif cmd != 0x36 and config.hdmicec.tv_wakeup_detection.value == "activity":
self.wakeup()
def configVolumeForwarding(self, configElement):
if config.hdmicec.enabled.value and config.hdmicec.volume_forwarding.value:
self.volumeForwardingEnable()
self.sendMessage(0x05, 'givesystemaudiostatus')
else:
self.volumeForwardingDisable()
def volumeForwardingEnable(self):
if self.saveVolMute is None:
self.saveVolUp = VolumeControl.volUp
self.saveVolDown = VolumeControl.volDown
self.saveVolMute = VolumeControl.volMute
VolumeControl.volUp = self.volUp
VolumeControl.volDown = self.volDown
VolumeControl.volMute = self.volMute
def volumeForwardingDisable(self):
if self.saveVolMute is not None:
VolumeControl.volUp = self.saveVolUp
VolumeControl.volDown = self.saveVolDown
VolumeControl.volMute = self.saveVolMute
self.saveVolUp = None
self.saveVolDown = None
self.saveVolMute = None
def volUp(self):
cmd = 0x44
data = str(struct.pack('B', 0x41))
eHdmiCEC.getInstance().sendMessage(self.logicaladdress * 0x10 + self.volumeForwardingDestination, cmd, data, len(data))
def volDown(self):
cmd = 0x44
data = str(struct.pack('B', 0x42))
eHdmiCEC.getInstance().sendMessage(self.logicaladdress * 0x10 + self.volumeForwardingDestination, cmd, data, len(data))
def volMute(self):
cmd = 0x44
data = str(struct.pack('B', 0x43))
eHdmiCEC.getInstance().sendMessage(self.logicaladdress * 0x10 + self.volumeForwardingDestination, cmd, data, len(data))
hdmi_cec = HdmiCec()
|
gpl-2.0
|
mercycorps/TolaActivity
|
workflow/migrations/0026_auto_20190116_1357.py
|
1
|
1071
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-01-16 21:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflow', '0025_merge_20190116_1357'),
]
operations = [
migrations.AddField(
model_name='tolauser',
name='mode_of_address',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='tolauser',
name='mode_of_contact',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='tolauser',
name='phone_number',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='program',
name='user_access',
field=models.ManyToManyField(blank=True, related_name='program_access', to='workflow.TolaUser'),
),
]
|
apache-2.0
|
heldergg/dre
|
lib/djapian/database.py
|
5
|
1773
|
import os
import xapian
from djapian.utils.decorators import reopen_if_modified
class Database(object):
def __init__(self, path):
self._path = path
def open(self, write=False):
"""
Opens database for manipulations
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
if write:
database = xapian.WritableDatabase(
self._path,
xapian.DB_CREATE_OR_OPEN,
)
else:
try:
database = xapian.Database(self._path)
except xapian.DatabaseOpeningError:
self.create_database()
database = xapian.Database(self._path)
return database
def create_database(self):
database = xapian.WritableDatabase(
self._path,
xapian.DB_CREATE_OR_OPEN,
)
del database
def document_count(self):
database = self.open()
return reopen_if_modified(database)(lambda: database.get_doccount())()
def clear(self):
try:
for file_path in os.listdir(self._path):
os.remove(os.path.join(self._path, file_path))
os.rmdir(self._path)
except OSError:
pass
class CompositeDatabase(Database):
def __init__(self, dbs):
self._dbs = dbs
def open(self, write=False):
if write:
raise ValueError("Composite database cannot be opened for writing")
base = self._dbs[0]
raw = base.open()
for db in self._dbs[1:]:
raw.add_database(db.open())
return raw
def create_database(self):
raise NotImplementedError
def clear(self):
raise NotImplementedError
|
gpl-3.0
|
mrjacobagilbert/gnuradio
|
gr-utils/modtool/core/info.py
|
6
|
5693
|
#
# Copyright 2013, 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
""" Returns information about a module """
import os
from ..tools import get_modname
from .base import ModTool, ModToolException
class ModToolInfo(ModTool):
""" Return information about a given module """
name = 'info'
description = 'Return information about a given module.'
def __init__(self, python_readable=False, suggested_dirs=None, **kwargs):
ModTool.__init__(self, **kwargs)
# Don't call ModTool._validate(), is is too chatty!
self._directory = self.dir
self._python_readable = python_readable
self._suggested_dirs = suggested_dirs
def run(self):
""" Go, go, go! """
mod_info = dict()
mod_info['base_dir'] = self._get_base_dir(self._directory)
if mod_info['base_dir'] is None:
raise ModToolException('{}' if self._python_readable else "No module found.")
os.chdir(mod_info['base_dir'])
mod_info['modname'] = get_modname()
if mod_info['modname'] is None:
raise ModToolException('{}' if self._python_readable else "No module found.")
if self.info['version'] == '36' and (
os.path.isdir(os.path.join('include', mod_info['modname'])) or
os.path.isdir(os.path.join('include', 'gnuradio', mod_info['modname']))
):
self.info['version'] = '37'
if not os.path.isfile(os.path.join('cmake', 'Modules', 'FindCppUnit.cmake')):
self.info['version'] = '38'
mod_info['version'] = self.info['version']
if 'is_component' in list(self.info.keys()) and self.info['is_component']:
mod_info['is_component'] = True
mod_info['incdirs'] = []
mod_incl_dir = os.path.join(mod_info['base_dir'], 'include')
if os.path.isdir(os.path.join(mod_incl_dir, mod_info['modname'])):
mod_info['incdirs'].append(os.path.join(mod_incl_dir, mod_info['modname']))
else:
mod_info['incdirs'].append(mod_incl_dir)
build_dir = self._get_build_dir(mod_info)
if build_dir is not None:
mod_info['build_dir'] = build_dir
mod_info['incdirs'] += self._get_include_dirs(mod_info)
if self._python_readable:
print(str(mod_info))
else:
self._pretty_print(mod_info)
def _get_base_dir(self, start_dir):
""" Figure out the base dir (where the top-level cmake file is) """
base_dir = os.path.abspath(start_dir)
if self._check_directory(base_dir):
return base_dir
else:
(up_dir, this_dir) = os.path.split(base_dir)
if os.path.split(up_dir)[1] == 'include':
up_dir = os.path.split(up_dir)[0]
if self._check_directory(up_dir):
return up_dir
return None
def _get_build_dir(self, mod_info):
""" Figure out the build dir (i.e. where you run 'cmake'). This checks
for a file called CMakeCache.txt, which is created when running cmake.
If that hasn't happened, the build dir cannot be detected, unless it's
called 'build', which is then assumed to be the build dir. """
base_build_dir = mod_info['base_dir']
if 'is_component' in list(mod_info.keys()):
(base_build_dir, rest_dir) = os.path.split(base_build_dir)
has_build_dir = os.path.isdir(os.path.join(base_build_dir, 'build'))
if (has_build_dir and os.path.isfile(os.path.join(base_build_dir, 'CMakeCache.txt'))):
return os.path.join(base_build_dir, 'build')
else:
for (dirpath, dirnames, filenames) in os.walk(base_build_dir):
if 'CMakeCache.txt' in filenames:
return dirpath
if has_build_dir:
return os.path.join(base_build_dir, 'build')
return None
def _get_include_dirs(self, mod_info):
""" Figure out include dirs for the make process. """
inc_dirs = []
path_or_internal = {True: 'INTERNAL',
False: 'PATH'}['is_component' in list(mod_info.keys())]
try:
cmakecache_fid = open(os.path.join(mod_info['build_dir'], 'CMakeCache.txt'))
for line in cmakecache_fid:
if line.find(f'GNURADIO_RUNTIME_INCLUDE_DIRS:{path_or_internal}') != -1:
inc_dirs += line.replace(f'GNURADIO_RUNTIME_INCLUDE_DIRS:{path_or_internal}=', '').strip().split(';')
except IOError:
pass
if not inc_dirs and self._suggested_dirs is not None:
inc_dirs = [os.path.normpath(path) for path in self._suggested_dirs.split(':') if os.path.isdir(path)]
return inc_dirs
def _pretty_print(elf, mod_info):
""" Output the module info in human-readable format """
index_names = {'base_dir': 'Base directory',
'modname': 'Module name',
'is_component': 'Is GR component',
'build_dir': 'Build directory',
'incdirs': 'Include directories'}
for key in list(mod_info.keys()):
if key == 'version':
version = {
'36': 'pre-3.7',
'37': 'post-3.7',
'38': 'post-3.8',
'autofoo': 'Autotools (pre-3.5)'
}[mod_info['version']]
print(f" API version: {version}")
else:
print('%19s: %s' % (index_names[key], mod_info[key]))
|
gpl-3.0
|
shrimpboyho/git.js
|
emscript/python/2.7.5.1_32bit/Lib/site-packages/win32/test/test_pywintypes.py
|
15
|
3756
|
import sys
import unittest
import pywintypes
import time
from pywin32_testutil import str2bytes, ob2memory
import datetime
import operator
class TestCase(unittest.TestCase):
def testPyTimeFormat(self):
struct_current = time.localtime()
pytime_current = pywintypes.Time(struct_current)
# try and test all the standard parts of the format
# Note we used to include '%Z' testing, but that was pretty useless as
# it always returned the local timezone.
format_strings = "%a %A %b %B %c %d %H %I %j %m %M %p %S %U %w %W %x %X %y %Y"
for fmt in format_strings.split():
v1 = pytime_current.Format(fmt)
v2 = time.strftime(fmt, struct_current)
self.assertEquals(v1, v2, "format %s failed - %r != %r" % (fmt, v1, v2))
def testPyTimePrint(self):
# This used to crash with an invalid, or too early time.
# We don't really want to check that it does cause a ValueError
# (as hopefully this wont be true forever). So either working, or
# ValueError is OK.
try:
t = pywintypes.Time(-2)
t.Format()
except ValueError:
return
def testTimeInDict(self):
d = {}
d['t1'] = pywintypes.Time(1)
self.failUnlessEqual(d['t1'], pywintypes.Time(1))
def testPyTimeCompare(self):
t1 = pywintypes.Time(100)
t1_2 = pywintypes.Time(100)
t2 = pywintypes.Time(101)
self.failUnlessEqual(t1, t1_2)
self.failUnless(t1 <= t1_2)
self.failUnless(t1_2 >= t1)
self.failIfEqual(t1, t2)
self.failUnless(t1 < t2)
self.failUnless(t2 > t1 )
def testPyTimeCompareOther(self):
t1 = pywintypes.Time(100)
t2 = None
self.failIfEqual(t1, t2)
def testTimeTuple(self):
now = datetime.datetime.now() # has usec...
# timetuple() lost usec - pt must be <=...
pt = pywintypes.Time(now.timetuple())
# *sob* - only if we have a datetime object can we compare like this.
if isinstance(pt, datetime.datetime):
self.failUnless(pt <= now)
def testTimeTuplems(self):
now = datetime.datetime.now() # has usec...
tt = now.timetuple() + (now.microsecond // 1000,)
pt = pywintypes.Time(tt)
# we can't compare if using the old type, as it loses all sub-second res.
if isinstance(pt, datetime.datetime):
self.failUnlessEqual(now, pt)
def testPyTimeFromTime(self):
t1 = pywintypes.Time(time.time())
self.failUnless(pywintypes.Time(t1) is t1)
def testGUID(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
iid2 = pywintypes.IID(ob2memory(iid), True)
self.assertEquals(iid, iid2)
self.assertRaises(ValueError, pywintypes.IID, str2bytes('00'), True) # too short
self.assertRaises(TypeError, pywintypes.IID, 0, True) # no buffer
def testGUIDRichCmp(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
self.failIf(s==None)
self.failIf(None==s)
self.failUnless(s!=None)
self.failUnless(None!=s)
if sys.version_info > (3,0):
self.assertRaises(TypeError, operator.gt, None, s)
self.assertRaises(TypeError, operator.gt, s, None)
self.assertRaises(TypeError, operator.lt, None, s)
self.assertRaises(TypeError, operator.lt, s, None)
def testGUIDInDict(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
d = dict(item=iid)
self.failUnlessEqual(d['item'], iid)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
bwrsandman/OpenUpgrade
|
addons/l10n_in_hr_payroll/wizard/__init__.py
|
430
|
1110
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_salary_employee_bymonth
import hr_yearly_salary_detail
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
zhuyue1314/CodeXt-ugly
|
LoadingBinaries/simplessl.py
|
2
|
2342
|
#!/usr/bin/env python
#https://docs.python.org/dev/library/ssl.html
import socket, ssl
def server():
bindsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bindsocket.bind(('127.0.0.1', 10000))
bindsocket.listen(5)
print "Listening for a connection"
tls_serv = ssl.wrap_socket(bindsocket, server_side=True, keyfile='./my.key', certfile="./my.crt", ssl_version=ssl.PROTOCOL_TLSv1)
connstream, fromaddr = tls_serv.accept()
#connstream = ssl.wrap_socket(newsocket, server_side=True, certfile="my.crt", ssl_version=ssl.PROTOCOL_TLSv1)
print "Connected"
try:
deal_with_client(connstream)
finally:
connstream.shutdown(socket.SHUT_RDWR)
connstream.close()
bindsocket.shutdown(socket.SHUT_RDWR)
bindsocket.close()
def deal_with_client(s):
data = "ddfd"
# empty data means the client is finished with us
while len (data) > 0:
print "Ready to recv"
data = s.recv(1024)
print "Data: " + data
def client():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(sock, cert_reqs = ssl.CERT_NONE, ssl_version = ssl.PROTOCOL_TLSv1)
ssl_sock.connect(('127.0.0.1', 10000))
ssl_sock.send ("test message")
ssl_sock.shutdown(socket.SHUT_RDWR)
ssl_sock.close()
import sys
if len (sys.argv) == 1 or (len (sys.argv) == 2 and sys.argv[1][0] == 's'):
server ()
else:
client ()
exit
"""
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout my.key -out my.crt
Generating a 2048 bit RSA private key
..........................+++
............................................+++
writing new private key to 'my.key'
-----
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [AU]:US
State or Province Name (full name) [Some-State]:VA
Locality Name (eg, city) []:Fairfax
Organization Name (eg, company) [Internet Widgits Pty Ltd]:GMU
Organizational Unit Name (eg, section) []:NSSL
Common Name (eg, YOUR name) []:Farley
Email Address []:
"""
|
gpl-2.0
|
bbbenja/SickRage
|
lib/imdb/linguistics.py
|
76
|
9353
|
"""
linguistics module (imdb package).
This module provides functions and data to handle in a smart way
languages and articles (in various languages) at the beginning of movie titles.
Copyright 2009-2012 Davide Alberani <[email protected]>
2012 Alberto Malagoli <albemala AT gmail.com>
2009 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at [email protected]; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'lo', 'el', 'las', 'un', 'los', 'una', 'al', 'del',
'unos', 'unas', 'uno'),
'French': ('le', "l'", 'la', 'les', 'un', 'une', 'des', 'au', 'du', '\xc3\xa0 la',
'de la', 'aux'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# [email protected] .
LANG_COUNTRIES = {
'English': ('Canada', 'Swaziland', 'Ghana', 'St. Lucia', 'Liberia', 'Jamaica', 'Bahamas', 'New Zealand', 'Lesotho', 'Kenya', 'Solomon Islands', 'United States', 'South Africa', 'St. Vincent and the Grenadines', 'Fiji', 'UK', 'Nigeria', 'Australia', 'USA', 'St. Kitts and Nevis', 'Belize', 'Sierra Leone', 'Gambia', 'Namibia', 'Micronesia', 'Kiribati', 'Grenada', 'Antigua and Barbuda', 'Barbados', 'Malta', 'Zimbabwe', 'Ireland', 'Uganda', 'Trinidad and Tobago', 'South Sudan', 'Guyana', 'Botswana', 'United Kingdom', 'Zambia'),
'Italian': ('Italy', 'San Marino', 'Vatican City'),
'Spanish': ('Spain', 'Mexico', 'Argentina', 'Bolivia', 'Guatemala', 'Uruguay', 'Peru', 'Cuba', 'Dominican Republic', 'Panama', 'Costa Rica', 'Ecuador', 'El Salvador', 'Chile', 'Equatorial Guinea', 'Spain', 'Colombia', 'Nicaragua', 'Venezuela', 'Honduras', 'Paraguay'),
'French': ('Cameroon', 'Burkina Faso', 'Dominica', 'Gabon', 'Monaco', 'France', "Cote d'Ivoire", 'Benin', 'Togo', 'Central African Republic', 'Mali', 'Niger', 'Congo, Republic of', 'Guinea', 'Congo, Democratic Republic of the', 'Luxembourg', 'Haiti', 'Chad', 'Burundi', 'Madagascar', 'Comoros', 'Senegal'),
'Portuguese': ('Portugal', 'Brazil', 'Sao Tome and Principe', 'Cape Verde', 'Angola', 'Mozambique', 'Guinea-Bissau'),
'German': ('Liechtenstein', 'Austria', 'West Germany', 'Switzerland', 'East Germany', 'Germany'),
'Arabic': ('Saudi Arabia', 'Kuwait', 'Jordan', 'Oman', 'Yemen', 'United Arab Emirates', 'Mauritania', 'Lebanon', 'Bahrain', 'Libya', 'Palestinian State (proposed)', 'Qatar', 'Algeria', 'Morocco', 'Iraq', 'Egypt', 'Djibouti', 'Sudan', 'Syria', 'Tunisia'),
'Turkish': ('Turkey', 'Azerbaijan'),
'Swahili': ('Tanzania',),
'Swedish': ('Sweden',),
'Icelandic': ('Iceland',),
'Estonian': ('Estonia',),
'Romanian': ('Romania',),
'Samoan': ('Samoa',),
'Slovenian': ('Slovenia',),
'Tok Pisin': ('Papua New Guinea',),
'Palauan': ('Palau',),
'Macedonian': ('Macedonia',),
'Hindi': ('India',),
'Dutch': ('Netherlands', 'Belgium', 'Suriname'),
'Marshallese': ('Marshall Islands',),
'Korean': ('Korea, North', 'Korea, South', 'North Korea', 'South Korea'),
'Vietnamese': ('Vietnam',),
'Danish': ('Denmark',),
'Khmer': ('Cambodia',),
'Lao': ('Laos',),
'Somali': ('Somalia',),
'Filipino': ('Philippines',),
'Hungarian': ('Hungary',),
'Ukrainian': ('Ukraine',),
'Bosnian': ('Bosnia and Herzegovina',),
'Georgian': ('Georgia',),
'Lithuanian': ('Lithuania',),
'Malay': ('Brunei',),
'Tetum': ('East Timor',),
'Norwegian': ('Norway',),
'Armenian': ('Armenia',),
'Russian': ('Russia',),
'Slovak': ('Slovakia',),
'Thai': ('Thailand',),
'Croatian': ('Croatia',),
'Turkmen': ('Turkmenistan',),
'Nepali': ('Nepal',),
'Finnish': ('Finland',),
'Uzbek': ('Uzbekistan',),
'Albanian': ('Albania', 'Kosovo'),
'Hebrew': ('Israel',),
'Bulgarian': ('Bulgaria',),
'Greek': ('Cyprus', 'Greece'),
'Burmese': ('Myanmar',),
'Latvian': ('Latvia',),
'Serbian': ('Serbia',),
'Afar': ('Eritrea',),
'Catalan': ('Andorra',),
'Chinese': ('China', 'Taiwan'),
'Czech': ('Czech Republic', 'Czechoslovakia'),
'Bislama': ('Vanuatu',),
'Japanese': ('Japan',),
'Kinyarwanda': ('Rwanda',),
'Amharic': ('Ethiopia',),
'Persian': ('Afghanistan', 'Iran'),
'Tajik': ('Tajikistan',),
'Mongolian': ('Mongolia',),
'Dzongkha': ('Bhutan',),
'Urdu': ('Pakistan',),
'Polish': ('Poland',),
'Sinhala': ('Sri Lanka',),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in LANG_COUNTRIES:
for country in LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
|
gpl-3.0
|
huzq/scikit-learn
|
sklearn/impute/tests/test_knn.py
|
15
|
17366
|
import numpy as np
import pytest
from sklearn import config_context
from sklearn.impute import KNNImputer
from sklearn.metrics.pairwise import nan_euclidean_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils._testing import assert_allclose
@pytest.mark.parametrize("weights", ["uniform", "distance"])
@pytest.mark.parametrize("n_neighbors", range(1, 6))
def test_knn_imputer_shape(weights, n_neighbors):
# Verify the shapes of the imputed matrix for different weights and
# number of neighbors.
n_rows = 10
n_cols = 2
X = np.random.rand(n_rows, n_cols)
X[0, 0] = np.nan
imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
X_imputed = imputer.fit_transform(X)
assert X_imputed.shape == (n_rows, n_cols)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_default_with_invalid_input(na):
# Test imputation with default values and invalid input
# Test with inf present
X = np.array([
[np.inf, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
with pytest.raises(ValueError, match="Input contains (infinity|NaN)"):
KNNImputer(missing_values=na).fit(X)
# Test with inf present in matrix passed in transform()
X = np.array([
[np.inf, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
X_fit = np.array([
[0, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
imputer = KNNImputer(missing_values=na).fit(X_fit)
with pytest.raises(ValueError, match="Input contains (infinity|NaN)"):
imputer.transform(X)
# negative n_neighbors
with pytest.raises(ValueError, match="Expected n_neighbors > 0"):
KNNImputer(missing_values=na, n_neighbors=0).fit(X_fit)
# Test with missing_values=0 when NaN present
imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
msg = (r"Input contains NaN, infinity or a value too large for "
r"dtype\('float64'\)")
with pytest.raises(ValueError, match=msg):
imputer.fit(X)
X = np.array([
[0, 0],
[np.nan, 2],
])
# Test with a metric type without NaN support
imputer = KNNImputer(metric="euclidean")
bad_metric_msg = "The selected metric does not support NaN values"
with pytest.raises(ValueError, match=bad_metric_msg):
imputer.fit(X)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_removes_all_na_features(na):
X = np.array([
[1, 1, na, 1, 1, 1.],
[2, 3, na, 2, 2, 2],
[3, 4, na, 3, 3, na],
[6, 4, na, na, 6, 6],
])
knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X)
X_transform = knn.transform(X)
assert not np.isnan(X_transform).any()
assert X_transform.shape == (4, 5)
X_test = np.arange(0, 12).reshape(2, 6)
X_transform = knn.transform(X_test)
assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_zero_nan_imputes_the_same(na):
# Test with an imputable matrix and compare with different missing_values
X_zero = np.array([
[1, 0, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 0],
[6, 6, 0, 6, 6],
])
X_nan = np.array([
[1, na, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, na],
[6, 6, na, 6, 6],
])
X_imputed = np.array([
[1, 2.5, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 1.5],
[6, 6, 2.5, 6, 6],
])
imputer_zero = KNNImputer(missing_values=0, n_neighbors=2,
weights="uniform")
imputer_nan = KNNImputer(missing_values=na, n_neighbors=2,
weights="uniform")
assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed)
assert_allclose(imputer_zero.fit_transform(X_zero),
imputer_nan.fit_transform(X_nan))
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_verify(na):
# Test with an imputable matrix
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, na],
[3, 2, 3, na],
[na, 4, 5, 5],
[6, na, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
X_imputed = np.array([
[1, 0, 0, 1],
[2, 1, 2, 8],
[3, 2, 3, 8],
[4, 4, 5, 5],
[6, 3, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test when there is not enough neighbors
X = np.array([
[1, 0, 0, na],
[2, 1, 2, na],
[3, 2, 3, na],
[4, 4, 5, na],
[6, 7, 6, na],
[8, 8, 8, na],
[20, 20, 20, 20],
[22, 22, 22, 22]
])
# Not enough neighbors, use column mean from training
X_impute_value = (20 + 22) / 2
X_imputed = np.array([
[1, 0, 0, X_impute_value],
[2, 1, 2, X_impute_value],
[3, 2, 3, X_impute_value],
[4, 4, 5, X_impute_value],
[6, 7, 6, X_impute_value],
[8, 8, 8, X_impute_value],
[20, 20, 20, 20],
[22, 22, 22, 22]
])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test when data in fit() and transform() are different
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 16]
])
X1 = np.array([
[1, 0],
[3, 2],
[4, na]
])
X_2_1 = (0 + 3 + 6 + 7 + 8) / 5
X1_imputed = np.array([
[1, 0],
[3, 2],
[4, X_2_1]
])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit(X).transform(X1), X1_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_one_n_neighbors(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, na],
[7, 7],
[na, 8],
[14, 13]
])
X_imputed = np.array([
[0, 0],
[4, 2],
[4, 3],
[5, 3],
[7, 7],
[7, 8],
[14, 13]
])
imputer = KNNImputer(n_neighbors=1, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_all_samples_are_neighbors(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, na],
[7, 7],
[na, 8],
[14, 13]
])
X_imputed = np.array([
[0, 0],
[6, 2],
[4, 3],
[5, 5.5],
[7, 7],
[6, 8],
[14, 13]
])
n_neighbors = X.shape[0] - 1
imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
n_neighbors = X.shape[0]
imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
assert_allclose(imputer_plus1.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_weight_uniform(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with "uniform" weight (or unweighted)
X_imputed_uniform = np.array([
[0, 0],
[5, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
imputer = KNNImputer(weights="uniform", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" weight
def no_weight(dist):
return None
imputer = KNNImputer(weights=no_weight, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" uniform weight
def uniform_weight(dist):
return np.ones_like(dist)
imputer = KNNImputer(weights=uniform_weight, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_weight_distance(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with "distance" weight
nn = KNeighborsRegressor(metric="euclidean", weights="distance")
X_rows_idx = [0, 2, 3, 4, 5, 6]
nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0])
knn_imputed_value = nn.predict(X[1:2, 1:])[0]
# Manual calculation
X_neighbors_idx = [0, 2, 3, 4, 5]
dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na)
weights = 1 / dist[:, X_neighbors_idx].ravel()
manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights)
X_imputed_distance1 = np.array([
[0, 0],
[manual_imputed_value, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# NearestNeighbor calculation
X_imputed_distance2 = np.array([
[0, 0],
[knn_imputed_value, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
imputer = KNNImputer(weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_distance1)
assert_allclose(imputer.fit_transform(X), X_imputed_distance2)
# Test with weights = "distance" and n_neighbors=2
X = np.array([
[na, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
])
# neighbors are rows 1, 2, the nan_euclidean_distances are:
dist_0_1 = np.sqrt((3/2)*((1 - 0)**2 + (2 - 0)**2))
dist_0_2 = np.sqrt((3/2)*((2 - 0)**2 + (3 - 0)**2))
imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2])
X_imputed = np.array([
[imputed_value, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
])
imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test with varying missingness patterns
X = np.array([
[1, 0, 0, 1],
[0, na, 1, na],
[1, 1, 1, na],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
])
# Get weights of donor neighbors
dist = nan_euclidean_distances(X, missing_values=na)
r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
r1c1_nbor_wt = 1 / r1c1_nbor_dists
r1c3_nbor_wt = 1 / r1c3_nbor_dists
r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
r2c3_nbor_wt = 1 / r2c3_nbor_dists
# Collect donor values
col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
# Final imputed values
r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
X_imputed = np.array([
[1, 0, 0, 1],
[0, r1c1_imp, 1, r1c3_imp],
[1, 1, 1, r2c3_imp],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
])
imputer = KNNImputer(weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
X = np.array([
[0, 0, 0, na],
[1, 1, 1, na],
[2, 2, na, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[na, 7, 7, 7]
])
dist = pairwise_distances(X, metric="nan_euclidean", squared=False,
missing_values=na)
# Calculate weights
r0c3_w = 1.0 / dist[0, 2:-1]
r1c3_w = 1.0 / dist[1, 2:-1]
r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
r7c0_w = 1.0 / dist[7, 2:7]
# Calculate weighted averages
r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
X_imputed = np.array([
[0, 0, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7]
])
imputer_comp_wt = KNNImputer(missing_values=na, weights="distance")
assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed)
def test_knn_imputer_callable_metric():
# Define callable metric that returns the l1 norm:
def custom_callable(x, y, missing_values=np.nan, squared=False):
x = np.ma.array(x, mask=np.isnan(x))
y = np.ma.array(y, mask=np.isnan(y))
dist = np.nansum(np.abs(x-y))
return dist
X = np.array([
[4, 3, 3, np.nan],
[6, 9, 6, 9],
[4, 8, 6, 9],
[np.nan, 9, 11, 10.]
])
X_0_3 = (9 + 9) / 2
X_3_0 = (6 + 4) / 2
X_imputed = np.array([
[4, 3, 3, X_0_3],
[6, 9, 6, 9],
[4, 8, 6, 9],
[X_3_0, 9, 11, 10.]
])
imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
assert_allclose(imputer.fit_transform(X), X_imputed)
@pytest.mark.parametrize("working_memory", [None, 0])
@pytest.mark.parametrize("na", [-1, np.nan])
# Note that we use working_memory=0 to ensure that chunking is tested, even
# for a small dataset. However, it should raise a UserWarning that we ignore.
@pytest.mark.filterwarnings("ignore:adhere to working_memory")
def test_knn_imputer_with_simple_example(na, working_memory):
X = np.array([
[0, na, 0, na],
[1, 1, 1, na],
[2, 2, na, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[na, 7, 7, 7]
])
r0c1 = np.mean(X[1:6, 1])
r0c3 = np.mean(X[2:-1, -1])
r1c3 = np.mean(X[2:-1, -1])
r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2])
r7c0 = np.mean(X[2:-1, 0])
X_imputed = np.array([
[0, r0c1, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7]
])
with config_context(working_memory=working_memory):
imputer_comp = KNNImputer(missing_values=na)
assert_allclose(imputer_comp.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [-1, np.nan])
@pytest.mark.parametrize("weights", ['uniform', 'distance'])
def test_knn_imputer_not_enough_valid_distances(na, weights):
# Samples with needed feature has nan distance
X1 = np.array([
[na, 11],
[na, 1],
[3, na]
])
X1_imputed = np.array([
[3, 11],
[3, 1],
[3, 6]
])
knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights)
assert_allclose(knn.fit_transform(X1), X1_imputed)
X2 = np.array([[4, na]])
X2_imputed = np.array([[4, 6]])
assert_allclose(knn.transform(X2), X2_imputed)
@pytest.mark.parametrize("na", [-1, np.nan])
def test_knn_imputer_drops_all_nan_features(na):
X1 = np.array([
[na, 1],
[na, 2]
])
knn = KNNImputer(missing_values=na, n_neighbors=1)
X1_expected = np.array([[1], [2]])
assert_allclose(knn.fit_transform(X1), X1_expected)
X2 = np.array([
[1, 2],
[3, na]
])
X2_expected = np.array([[2], [1.5]])
assert_allclose(knn.transform(X2), X2_expected)
@pytest.mark.parametrize("working_memory", [None, 0])
@pytest.mark.parametrize("na", [-1, np.nan])
def test_knn_imputer_distance_weighted_not_enough_neighbors(na,
working_memory):
X = np.array([
[3, na],
[2, na],
[na, 4],
[5, 6],
[6, 8],
[na, 5]
])
dist = pairwise_distances(X, metric="nan_euclidean", squared=False,
missing_values=na)
X_01 = np.average(X[3:5, 1], weights=1/dist[0, 3:5])
X_11 = np.average(X[3:5, 1], weights=1/dist[1, 3:5])
X_20 = np.average(X[3:5, 0], weights=1/dist[2, 3:5])
X_50 = np.average(X[3:5, 0], weights=1/dist[5, 3:5])
X_expected = np.array([
[3, X_01],
[2, X_11],
[X_20, 4],
[5, 6],
[6, 8],
[X_50, 5]
])
with config_context(working_memory=working_memory):
knn_3 = KNNImputer(missing_values=na, n_neighbors=3,
weights='distance')
assert_allclose(knn_3.fit_transform(X), X_expected)
knn_4 = KNNImputer(missing_values=na, n_neighbors=4,
weights='distance')
assert_allclose(knn_4.fit_transform(X), X_expected)
@pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)])
def test_knn_tags(na, allow_nan):
knn = KNNImputer(missing_values=na)
assert knn._get_tags()["allow_nan"] == allow_nan
|
bsd-3-clause
|
bobeirasa/virtualenvs
|
pygeckozabbix/lib/python2.7/site-packages/setuptools/command/build_ext.py
|
286
|
11854
|
from distutils.command.build_ext import build_ext as _du_build_ext
try:
# Attempt to use Pyrex for building extensions, if available
from Pyrex.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
import os, sys
from distutils.file_util import copy_file
from setuptools.extension import Library
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
try:
# Python 2.7 or >=3.2
from sysconfig import _CONFIG_VARS
except ImportError:
from distutils.sysconfig import get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
del get_config_var
from distutils.sysconfig import _config_vars as _CONFIG_VARS
from distutils import log
from distutils.errors import *
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
from dl import RTLD_NOW
have_rtld = True
use_stubs = True
except ImportError:
pass
def if_dl(s):
if have_rtld:
return s
return ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,os.path.basename(filename))
src_filename = os.path.join(self.build_lib,filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
if _build_ext is not _du_build_ext and not hasattr(_build_ext,'pyrex_sources'):
# Workaround for problems using some Pyrex versions w/SWIG and/or 2.4
def swig_sources(self, sources, *otherargs):
# first do any Pyrex processing
sources = _build_ext.swig_sources(self, sources) or sources
# Then do any actual SWIG stuff on the remainder
return _du_build_ext.swig_sources(self, sources, *otherargs)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self,fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext,Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn,libtype)
elif use_stubs and ext._links_to_dynamic:
d,fn = os.path.split(filename)
return os.path.join(d,'dl-'+fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext,Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = ext._links_to_dynamic = \
self.shlibs and self.links_to_dynamic(ext) or False
ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library)
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib,filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext,Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self,ext)
def build_extension(self, ext):
_compiler = self.compiler
try:
if isinstance(ext,Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self,ext)
if ext._needs_stub:
self.write_stub(
self.get_finalized_command('build_py').build_lib, ext
)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False
def get_outputs(self):
outputs = _build_ext.get_outputs(self)
optimize = self.get_finalized_command('build_py').optimize
for ext in self.extensions:
if ext._needs_stub:
base = os.path.join(self.build_lib, *ext._full_name.split('.'))
outputs.append(base+'.py')
outputs.append(base+'.pyc')
if optimize:
outputs.append(base+'.pyo')
return outputs
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s",ext._full_name, output_dir)
stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py'
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file+" already exists! Please delete.")
if not self.dry_run:
f = open(stub_file,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp"+if_dl(", dl"),
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name=='nt':
# Build shared libraries
#
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
): self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None
):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
#libraries=None, library_dirs=None, runtime_library_dirs=None,
#export_symbols=None, extra_preargs=None, extra_postargs=None,
#build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir,filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
|
mit
|
tinfoil/phantomjs
|
src/qt/qtbase/util/local_database/cldr2qlocalexml.py
|
102
|
42691
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os
import sys
import enumdata
import xpathlite
from xpathlite import DraftResolution
from dateconverter import convert_date
from xml.sax.saxutils import escape, unescape
import re
findAlias = xpathlite.findAlias
findEntry = xpathlite.findEntry
findEntryInFile = xpathlite._findEntryInFile
findTagsInFile = xpathlite.findTagsInFile
def parse_number_format(patterns, data):
# this is a very limited parsing of the number format for currency only.
def skip_repeating_pattern(x):
p = x.replace('0', '#').replace(',', '').replace('.', '')
seen = False
result = ''
for c in p:
if c == '#':
if seen:
continue
seen = True
else:
seen = False
result = result + c
return result
patterns = patterns.split(';')
result = []
for pattern in patterns:
pattern = skip_repeating_pattern(pattern)
pattern = pattern.replace('#', "%1")
# according to http://www.unicode.org/reports/tr35/#Number_Format_Patterns
# there can be doubled or trippled currency sign, however none of the
# locales use that.
pattern = pattern.replace(u'\xa4', "%2")
pattern = pattern.replace("''", "###").replace("'", '').replace("###", "'")
pattern = pattern.replace('-', data['minus'])
pattern = pattern.replace('+', data['plus'])
result.append(pattern)
return result
def parse_list_pattern_part_format(pattern):
# this is a very limited parsing of the format for list pattern part only.
result = ""
result = pattern.replace("{0}", "%1")
result = result.replace("{1}", "%2")
result = result.replace("{2}", "%3")
return result
def ordStr(c):
if len(c) == 1:
return str(ord(c))
raise xpathlite.Error("Unable to handle value \"%s\"" % addEscapes(c))
return "##########"
# the following functions are supposed to fix the problem with QLocale
# returning a character instead of strings for QLocale::exponential()
# and others. So we fallback to default values in these cases.
def fixOrdStrMinus(c):
if len(c) == 1:
return str(ord(c))
return str(ord('-'))
def fixOrdStrPlus(c):
if len(c) == 1:
return str(ord(c))
return str(ord('+'))
def fixOrdStrExp(c):
if len(c) == 1:
return str(ord(c))
return str(ord('e'))
def fixOrdStrPercent(c):
if len(c) == 1:
return str(ord(c))
return str(ord('%'))
def fixOrdStrList(c):
if len(c) == 1:
return str(ord(c))
return str(ord(';'))
def generateLocaleInfo(path):
(dir_name, file_name) = os.path.split(path)
if not path.endswith(".xml"):
return {}
# skip legacy/compatibility ones
alias = findAlias(path)
if alias:
raise xpathlite.Error("alias to \"%s\"" % alias)
language_code = findEntryInFile(path, "identity/language", attribute="type")[0]
if language_code == 'root':
# just skip it
return {}
country_code = findEntryInFile(path, "identity/territory", attribute="type")[0]
script_code = findEntryInFile(path, "identity/script", attribute="type")[0]
variant_code = findEntryInFile(path, "identity/variant", attribute="type")[0]
# we do not support variants
# ### actually there is only one locale with variant: en_US_POSIX
# does anybody care about it at all?
if variant_code:
raise xpathlite.Error("we do not support variants (\"%s\")" % variant_code)
language_id = enumdata.languageCodeToId(language_code)
if language_id <= 0:
raise xpathlite.Error("unknown language code \"%s\"" % language_code)
language = enumdata.language_list[language_id][0]
script_id = enumdata.scriptCodeToId(script_code)
if script_id == -1:
raise xpathlite.Error("unknown script code \"%s\"" % script_code)
script = enumdata.script_list[script_id][0]
# we should handle fully qualified names with the territory
if not country_code:
return {}
country_id = enumdata.countryCodeToId(country_code)
if country_id <= 0:
raise xpathlite.Error("unknown country code \"%s\"" % country_code)
country = enumdata.country_list[country_id][0]
# So we say we accept only those values that have "contributed" or
# "approved" resolution. see http://www.unicode.org/cldr/process.html
# But we only respect the resolution for new datas for backward
# compatibility.
draft = DraftResolution.contributed
result = {}
result['language'] = language
result['script'] = script
result['country'] = country
result['language_code'] = language_code
result['country_code'] = country_code
result['script_code'] = script_code
result['variant_code'] = variant_code
result['language_id'] = language_id
result['script_id'] = script_id
result['country_id'] = country_id
supplementalPath = dir_name + "/../supplemental/supplementalData.xml"
currencies = findTagsInFile(supplementalPath, "currencyData/region[iso3166=%s]"%country_code);
result['currencyIsoCode'] = ''
result['currencyDigits'] = 2
result['currencyRounding'] = 1
if currencies:
for e in currencies:
if e[0] == 'currency':
tender = True
t = filter(lambda x: x[0] == 'tender', e[1])
if t and t[0][1] == 'false':
tender = False;
if tender and not filter(lambda x: x[0] == 'to', e[1]):
result['currencyIsoCode'] = filter(lambda x: x[0] == 'iso4217', e[1])[0][1]
break
if result['currencyIsoCode']:
t = findTagsInFile(supplementalPath, "currencyData/fractions/info[iso4217=%s]"%result['currencyIsoCode']);
if t and t[0][0] == 'info':
result['currencyDigits'] = int(filter(lambda x: x[0] == 'digits', t[0][1])[0][1])
result['currencyRounding'] = int(filter(lambda x: x[0] == 'rounding', t[0][1])[0][1])
numbering_system = None
try:
numbering_system = findEntry(path, "numbers/defaultNumberingSystem")
except:
pass
def findEntryDef(path, xpath, value=''):
try:
return findEntry(path, xpath)
except xpathlite.Error:
return value
def get_number_in_system(path, xpath, numbering_system):
if numbering_system:
try:
return findEntry(path, xpath + "[numberSystem=" + numbering_system + "]")
except xpathlite.Error:
# in CLDR 1.9 number system was refactored for numbers (but not for currency)
# so if previous findEntry doesn't work we should try this:
try:
return findEntry(path, xpath.replace("/symbols/", "/symbols[numberSystem=" + numbering_system + "]/"))
except xpathlite.Error:
# fallback to default
pass
return findEntry(path, xpath)
result['decimal'] = get_number_in_system(path, "numbers/symbols/decimal", numbering_system)
result['group'] = get_number_in_system(path, "numbers/symbols/group", numbering_system)
result['list'] = get_number_in_system(path, "numbers/symbols/list", numbering_system)
result['percent'] = get_number_in_system(path, "numbers/symbols/percentSign", numbering_system)
try:
numbering_systems = {}
for ns in findTagsInFile(cldr_dir + "/../supplemental/numberingSystems.xml", "numberingSystems"):
tmp = {}
id = ""
for data in ns[1:][0]: # ns looks like this: [u'numberingSystem', [(u'digits', u'0123456789'), (u'type', u'numeric'), (u'id', u'latn')]]
tmp[data[0]] = data[1]
if data[0] == u"id":
id = data[1]
numbering_systems[id] = tmp
result['zero'] = numbering_systems[numbering_system][u"digits"][0]
except e:
sys.stderr.write("Native zero detection problem:\n" + str(e) + "\n")
result['zero'] = get_number_in_system(path, "numbers/symbols/nativeZeroDigit", numbering_system)
result['minus'] = get_number_in_system(path, "numbers/symbols/minusSign", numbering_system)
result['plus'] = get_number_in_system(path, "numbers/symbols/plusSign", numbering_system)
result['exp'] = get_number_in_system(path, "numbers/symbols/exponential", numbering_system).lower()
result['quotationStart'] = findEntry(path, "delimiters/quotationStart")
result['quotationEnd'] = findEntry(path, "delimiters/quotationEnd")
result['alternateQuotationStart'] = findEntry(path, "delimiters/alternateQuotationStart")
result['alternateQuotationEnd'] = findEntry(path, "delimiters/alternateQuotationEnd")
result['listPatternPartStart'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[start]"))
result['listPatternPartMiddle'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[middle]"))
result['listPatternPartEnd'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[end]"))
result['listPatternPartTwo'] = parse_list_pattern_part_format(findEntry(path, "listPatterns/listPattern/listPatternPart[2]"))
result['am'] = findEntry(path, "dates/calendars/calendar[gregorian]/dayPeriods/dayPeriodContext[format]/dayPeriodWidth[wide]/dayPeriod[am]", draft)
result['pm'] = findEntry(path, "dates/calendars/calendar[gregorian]/dayPeriods/dayPeriodContext[format]/dayPeriodWidth[wide]/dayPeriod[pm]", draft)
result['longDateFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/dateFormats/dateFormatLength[full]/dateFormat/pattern"))
result['shortDateFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/dateFormats/dateFormatLength[short]/dateFormat/pattern"))
result['longTimeFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/timeFormats/timeFormatLength[full]/timeFormat/pattern"))
result['shortTimeFormat'] = convert_date(findEntry(path, "dates/calendars/calendar[gregorian]/timeFormats/timeFormatLength[short]/timeFormat/pattern"))
endonym = None
if country_code and script_code:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s_%s]" % (language_code, script_code, country_code))
if not endonym and script_code:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s]" % (language_code, script_code))
if not endonym and country_code:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s_%s]" % (language_code, country_code))
if not endonym:
endonym = findEntryDef(path, "localeDisplayNames/languages/language[type=%s]" % (language_code))
result['language_endonym'] = endonym
result['country_endonym'] = findEntryDef(path, "localeDisplayNames/territories/territory[type=%s]" % (country_code))
currency_format = get_number_in_system(path, "numbers/currencyFormats/currencyFormatLength/currencyFormat/pattern", numbering_system)
currency_format = parse_number_format(currency_format, result)
result['currencyFormat'] = currency_format[0]
result['currencyNegativeFormat'] = ''
if len(currency_format) > 1:
result['currencyNegativeFormat'] = currency_format[1]
result['currencySymbol'] = ''
result['currencyDisplayName'] = ''
if result['currencyIsoCode']:
result['currencySymbol'] = findEntryDef(path, "numbers/currencies/currency[%s]/symbol" % result['currencyIsoCode'])
display_name_path = "numbers/currencies/currency[%s]/displayName" % result['currencyIsoCode']
result['currencyDisplayName'] \
= findEntryDef(path, display_name_path) + ";" \
+ findEntryDef(path, display_name_path + "[count=zero]") + ";" \
+ findEntryDef(path, display_name_path + "[count=one]") + ";" \
+ findEntryDef(path, display_name_path + "[count=two]") + ";" \
+ findEntryDef(path, display_name_path + "[count=few]") + ";" \
+ findEntryDef(path, display_name_path + "[count=many]") + ";" \
+ findEntryDef(path, display_name_path + "[count=other]") + ";"
standalone_long_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[wide]/month"
result['standaloneLongMonths'] \
= findEntry(path, standalone_long_month_path + "[1]") + ";" \
+ findEntry(path, standalone_long_month_path + "[2]") + ";" \
+ findEntry(path, standalone_long_month_path + "[3]") + ";" \
+ findEntry(path, standalone_long_month_path + "[4]") + ";" \
+ findEntry(path, standalone_long_month_path + "[5]") + ";" \
+ findEntry(path, standalone_long_month_path + "[6]") + ";" \
+ findEntry(path, standalone_long_month_path + "[7]") + ";" \
+ findEntry(path, standalone_long_month_path + "[8]") + ";" \
+ findEntry(path, standalone_long_month_path + "[9]") + ";" \
+ findEntry(path, standalone_long_month_path + "[10]") + ";" \
+ findEntry(path, standalone_long_month_path + "[11]") + ";" \
+ findEntry(path, standalone_long_month_path + "[12]") + ";"
standalone_short_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[abbreviated]/month"
result['standaloneShortMonths'] \
= findEntry(path, standalone_short_month_path + "[1]") + ";" \
+ findEntry(path, standalone_short_month_path + "[2]") + ";" \
+ findEntry(path, standalone_short_month_path + "[3]") + ";" \
+ findEntry(path, standalone_short_month_path + "[4]") + ";" \
+ findEntry(path, standalone_short_month_path + "[5]") + ";" \
+ findEntry(path, standalone_short_month_path + "[6]") + ";" \
+ findEntry(path, standalone_short_month_path + "[7]") + ";" \
+ findEntry(path, standalone_short_month_path + "[8]") + ";" \
+ findEntry(path, standalone_short_month_path + "[9]") + ";" \
+ findEntry(path, standalone_short_month_path + "[10]") + ";" \
+ findEntry(path, standalone_short_month_path + "[11]") + ";" \
+ findEntry(path, standalone_short_month_path + "[12]") + ";"
standalone_narrow_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[stand-alone]/monthWidth[narrow]/month"
result['standaloneNarrowMonths'] \
= findEntry(path, standalone_narrow_month_path + "[1]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[2]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[3]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[4]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[5]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[6]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[7]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[8]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[9]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[10]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[11]") + ";" \
+ findEntry(path, standalone_narrow_month_path + "[12]") + ";"
long_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[wide]/month"
result['longMonths'] \
= findEntry(path, long_month_path + "[1]") + ";" \
+ findEntry(path, long_month_path + "[2]") + ";" \
+ findEntry(path, long_month_path + "[3]") + ";" \
+ findEntry(path, long_month_path + "[4]") + ";" \
+ findEntry(path, long_month_path + "[5]") + ";" \
+ findEntry(path, long_month_path + "[6]") + ";" \
+ findEntry(path, long_month_path + "[7]") + ";" \
+ findEntry(path, long_month_path + "[8]") + ";" \
+ findEntry(path, long_month_path + "[9]") + ";" \
+ findEntry(path, long_month_path + "[10]") + ";" \
+ findEntry(path, long_month_path + "[11]") + ";" \
+ findEntry(path, long_month_path + "[12]") + ";"
short_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[abbreviated]/month"
result['shortMonths'] \
= findEntry(path, short_month_path + "[1]") + ";" \
+ findEntry(path, short_month_path + "[2]") + ";" \
+ findEntry(path, short_month_path + "[3]") + ";" \
+ findEntry(path, short_month_path + "[4]") + ";" \
+ findEntry(path, short_month_path + "[5]") + ";" \
+ findEntry(path, short_month_path + "[6]") + ";" \
+ findEntry(path, short_month_path + "[7]") + ";" \
+ findEntry(path, short_month_path + "[8]") + ";" \
+ findEntry(path, short_month_path + "[9]") + ";" \
+ findEntry(path, short_month_path + "[10]") + ";" \
+ findEntry(path, short_month_path + "[11]") + ";" \
+ findEntry(path, short_month_path + "[12]") + ";"
narrow_month_path = "dates/calendars/calendar[gregorian]/months/monthContext[format]/monthWidth[narrow]/month"
result['narrowMonths'] \
= findEntry(path, narrow_month_path + "[1]") + ";" \
+ findEntry(path, narrow_month_path + "[2]") + ";" \
+ findEntry(path, narrow_month_path + "[3]") + ";" \
+ findEntry(path, narrow_month_path + "[4]") + ";" \
+ findEntry(path, narrow_month_path + "[5]") + ";" \
+ findEntry(path, narrow_month_path + "[6]") + ";" \
+ findEntry(path, narrow_month_path + "[7]") + ";" \
+ findEntry(path, narrow_month_path + "[8]") + ";" \
+ findEntry(path, narrow_month_path + "[9]") + ";" \
+ findEntry(path, narrow_month_path + "[10]") + ";" \
+ findEntry(path, narrow_month_path + "[11]") + ";" \
+ findEntry(path, narrow_month_path + "[12]") + ";"
long_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[wide]/day"
result['longDays'] \
= findEntry(path, long_day_path + "[sun]") + ";" \
+ findEntry(path, long_day_path + "[mon]") + ";" \
+ findEntry(path, long_day_path + "[tue]") + ";" \
+ findEntry(path, long_day_path + "[wed]") + ";" \
+ findEntry(path, long_day_path + "[thu]") + ";" \
+ findEntry(path, long_day_path + "[fri]") + ";" \
+ findEntry(path, long_day_path + "[sat]") + ";"
short_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[abbreviated]/day"
result['shortDays'] \
= findEntry(path, short_day_path + "[sun]") + ";" \
+ findEntry(path, short_day_path + "[mon]") + ";" \
+ findEntry(path, short_day_path + "[tue]") + ";" \
+ findEntry(path, short_day_path + "[wed]") + ";" \
+ findEntry(path, short_day_path + "[thu]") + ";" \
+ findEntry(path, short_day_path + "[fri]") + ";" \
+ findEntry(path, short_day_path + "[sat]") + ";"
narrow_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[format]/dayWidth[narrow]/day"
result['narrowDays'] \
= findEntry(path, narrow_day_path + "[sun]") + ";" \
+ findEntry(path, narrow_day_path + "[mon]") + ";" \
+ findEntry(path, narrow_day_path + "[tue]") + ";" \
+ findEntry(path, narrow_day_path + "[wed]") + ";" \
+ findEntry(path, narrow_day_path + "[thu]") + ";" \
+ findEntry(path, narrow_day_path + "[fri]") + ";" \
+ findEntry(path, narrow_day_path + "[sat]") + ";"
standalone_long_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[wide]/day"
result['standaloneLongDays'] \
= findEntry(path, standalone_long_day_path + "[sun]") + ";" \
+ findEntry(path, standalone_long_day_path + "[mon]") + ";" \
+ findEntry(path, standalone_long_day_path + "[tue]") + ";" \
+ findEntry(path, standalone_long_day_path + "[wed]") + ";" \
+ findEntry(path, standalone_long_day_path + "[thu]") + ";" \
+ findEntry(path, standalone_long_day_path + "[fri]") + ";" \
+ findEntry(path, standalone_long_day_path + "[sat]") + ";"
standalone_short_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[abbreviated]/day"
result['standaloneShortDays'] \
= findEntry(path, standalone_short_day_path + "[sun]") + ";" \
+ findEntry(path, standalone_short_day_path + "[mon]") + ";" \
+ findEntry(path, standalone_short_day_path + "[tue]") + ";" \
+ findEntry(path, standalone_short_day_path + "[wed]") + ";" \
+ findEntry(path, standalone_short_day_path + "[thu]") + ";" \
+ findEntry(path, standalone_short_day_path + "[fri]") + ";" \
+ findEntry(path, standalone_short_day_path + "[sat]") + ";"
standalone_narrow_day_path = "dates/calendars/calendar[gregorian]/days/dayContext[stand-alone]/dayWidth[narrow]/day"
result['standaloneNarrowDays'] \
= findEntry(path, standalone_narrow_day_path + "[sun]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[mon]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[tue]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[wed]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[thu]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[fri]") + ";" \
+ findEntry(path, standalone_narrow_day_path + "[sat]") + ";"
return result
def addEscapes(s):
result = ''
for c in s:
n = ord(c)
if n < 128:
result += c
else:
result += "\\x"
result += "%02x" % (n)
return result
def unicodeStr(s):
utf8 = s.encode('utf-8')
return "<size>" + str(len(utf8)) + "</size><data>" + addEscapes(utf8) + "</data>"
def usage():
print "Usage: cldr2qlocalexml.py <path-to-cldr-main>"
sys.exit()
def integrateWeekData(filePath):
if not filePath.endswith(".xml"):
return {}
monFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=mon]", attribute="territories")[0].split(" ")
tueFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=tue]", attribute="territories")[0].split(" ")
wedFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=wed]", attribute="territories")[0].split(" ")
thuFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=thu]", attribute="territories")[0].split(" ")
friFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=fri]", attribute="territories")[0].split(" ")
satFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=sat]", attribute="territories")[0].split(" ")
sunFirstDayIn = findEntryInFile(filePath, "weekData/firstDay[day=sun]", attribute="territories")[0].split(" ")
monWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=mon]", attribute="territories")[0].split(" ")
tueWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=tue]", attribute="territories")[0].split(" ")
wedWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=wed]", attribute="territories")[0].split(" ")
thuWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=thu]", attribute="territories")[0].split(" ")
friWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=fri]", attribute="territories")[0].split(" ")
satWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=sat]", attribute="territories")[0].split(" ")
sunWeekendStart = findEntryInFile(filePath, "weekData/weekendStart[day=sun]", attribute="territories")[0].split(" ")
monWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=mon]", attribute="territories")[0].split(" ")
tueWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=tue]", attribute="territories")[0].split(" ")
wedWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=wed]", attribute="territories")[0].split(" ")
thuWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=thu]", attribute="territories")[0].split(" ")
friWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=fri]", attribute="territories")[0].split(" ")
satWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=sat]", attribute="territories")[0].split(" ")
sunWeekendEnd = findEntryInFile(filePath, "weekData/weekendEnd[day=sun]", attribute="territories")[0].split(" ")
firstDayByCountryCode = {}
for countryCode in monFirstDayIn:
firstDayByCountryCode[countryCode] = "mon"
for countryCode in tueFirstDayIn:
firstDayByCountryCode[countryCode] = "tue"
for countryCode in wedFirstDayIn:
firstDayByCountryCode[countryCode] = "wed"
for countryCode in thuFirstDayIn:
firstDayByCountryCode[countryCode] = "thu"
for countryCode in friFirstDayIn:
firstDayByCountryCode[countryCode] = "fri"
for countryCode in satFirstDayIn:
firstDayByCountryCode[countryCode] = "sat"
for countryCode in sunFirstDayIn:
firstDayByCountryCode[countryCode] = "sun"
weekendStartByCountryCode = {}
for countryCode in monWeekendStart:
weekendStartByCountryCode[countryCode] = "mon"
for countryCode in tueWeekendStart:
weekendStartByCountryCode[countryCode] = "tue"
for countryCode in wedWeekendStart:
weekendStartByCountryCode[countryCode] = "wed"
for countryCode in thuWeekendStart:
weekendStartByCountryCode[countryCode] = "thu"
for countryCode in friWeekendStart:
weekendStartByCountryCode[countryCode] = "fri"
for countryCode in satWeekendStart:
weekendStartByCountryCode[countryCode] = "sat"
for countryCode in sunWeekendStart:
weekendStartByCountryCode[countryCode] = "sun"
weekendEndByCountryCode = {}
for countryCode in monWeekendEnd:
weekendEndByCountryCode[countryCode] = "mon"
for countryCode in tueWeekendEnd:
weekendEndByCountryCode[countryCode] = "tue"
for countryCode in wedWeekendEnd:
weekendEndByCountryCode[countryCode] = "wed"
for countryCode in thuWeekendEnd:
weekendEndByCountryCode[countryCode] = "thu"
for countryCode in friWeekendEnd:
weekendEndByCountryCode[countryCode] = "fri"
for countryCode in satWeekendEnd:
weekendEndByCountryCode[countryCode] = "sat"
for countryCode in sunWeekendEnd:
weekendEndByCountryCode[countryCode] = "sun"
for (key,locale) in locale_database.iteritems():
countryCode = locale['country_code']
if countryCode in firstDayByCountryCode:
locale_database[key]['firstDayOfWeek'] = firstDayByCountryCode[countryCode]
else:
locale_database[key]['firstDayOfWeek'] = firstDayByCountryCode["001"]
if countryCode in weekendStartByCountryCode:
locale_database[key]['weekendStart'] = weekendStartByCountryCode[countryCode]
else:
locale_database[key]['weekendStart'] = weekendStartByCountryCode["001"]
if countryCode in weekendEndByCountryCode:
locale_database[key]['weekendEnd'] = weekendEndByCountryCode[countryCode]
else:
locale_database[key]['weekendEnd'] = weekendEndByCountryCode["001"]
if len(sys.argv) != 2:
usage()
cldr_dir = sys.argv[1]
if not os.path.isdir(cldr_dir):
usage()
cldr_files = os.listdir(cldr_dir)
locale_database = {}
for file in cldr_files:
try:
l = generateLocaleInfo(cldr_dir + "/" + file)
if not l:
sys.stderr.write("skipping file \"" + file + "\"\n")
continue
except xpathlite.Error as e:
sys.stderr.write("skipping file \"%s\" (%s)\n" % (file, str(e)))
continue
locale_database[(l['language_id'], l['script_id'], l['country_id'], l['variant_code'])] = l
integrateWeekData(cldr_dir+"/../supplemental/supplementalData.xml")
locale_keys = locale_database.keys()
locale_keys.sort()
cldr_version = 'unknown'
ldml = open(cldr_dir+"/../dtd/ldml.dtd", "r")
for line in ldml:
if 'version cldrVersion CDATA #FIXED' in line:
cldr_version = line.split('"')[1]
print "<localeDatabase>"
print " <version>" + cldr_version + "</version>"
print " <languageList>"
for id in enumdata.language_list:
l = enumdata.language_list[id]
print " <language>"
print " <name>" + l[0] + "</name>"
print " <id>" + str(id) + "</id>"
print " <code>" + l[1] + "</code>"
print " </language>"
print " </languageList>"
print " <scriptList>"
for id in enumdata.script_list:
l = enumdata.script_list[id]
print " <script>"
print " <name>" + l[0] + "</name>"
print " <id>" + str(id) + "</id>"
print " <code>" + l[1] + "</code>"
print " </script>"
print " </scriptList>"
print " <countryList>"
for id in enumdata.country_list:
l = enumdata.country_list[id]
print " <country>"
print " <name>" + l[0] + "</name>"
print " <id>" + str(id) + "</id>"
print " <code>" + l[1] + "</code>"
print " </country>"
print " </countryList>"
def _parseLocale(l):
language = "AnyLanguage"
script = "AnyScript"
country = "AnyCountry"
if l == "und":
raise xpathlite.Error("we are treating unknown locale like C")
items = l.split("_")
language_code = items[0]
if language_code != "und":
language_id = enumdata.languageCodeToId(language_code)
if language_id == -1:
raise xpathlite.Error("unknown language code \"%s\"" % language_code)
language = enumdata.language_list[language_id][0]
if len(items) > 1:
script_code = items[1]
country_code = ""
if len(items) > 2:
country_code = items[2]
if len(script_code) == 4:
script_id = enumdata.scriptCodeToId(script_code)
if script_id == -1:
raise xpathlite.Error("unknown script code \"%s\"" % script_code)
script = enumdata.script_list[script_id][0]
else:
country_code = script_code
if country_code:
country_id = enumdata.countryCodeToId(country_code)
if country_id == -1:
raise xpathlite.Error("unknown country code \"%s\"" % country_code)
country = enumdata.country_list[country_id][0]
return (language, script, country)
print " <likelySubtags>"
for ns in findTagsInFile(cldr_dir + "/../supplemental/likelySubtags.xml", "likelySubtags"):
tmp = {}
for data in ns[1:][0]: # ns looks like this: [u'likelySubtag', [(u'from', u'aa'), (u'to', u'aa_Latn_ET')]]
tmp[data[0]] = data[1]
try:
(from_language, from_script, from_country) = _parseLocale(tmp[u"from"])
except xpathlite.Error as e:
sys.stderr.write("skipping likelySubtag \"%s\" -> \"%s\" (%s)\n" % (tmp[u"from"], tmp[u"to"], str(e)))
continue
try:
(to_language, to_script, to_country) = _parseLocale(tmp[u"to"])
except xpathlite.Error as e:
sys.stderr.write("skipping likelySubtag \"%s\" -> \"%s\" (%s)\n" % (tmp[u"from"], tmp[u"to"], str(e)))
continue
# substitute according to http://www.unicode.org/reports/tr35/#Likely_Subtags
if to_country == "AnyCountry" and from_country != to_country:
to_country = from_country
if to_script == "AnyScript" and from_script != to_script:
to_script = from_script
print " <likelySubtag>"
print " <from>"
print " <language>" + from_language + "</language>"
print " <script>" + from_script + "</script>"
print " <country>" + from_country + "</country>"
print " </from>"
print " <to>"
print " <language>" + to_language + "</language>"
print " <script>" + to_script + "</script>"
print " <country>" + to_country + "</country>"
print " </to>"
print " </likelySubtag>"
print " </likelySubtags>"
print " <localeList>"
print \
" <locale>\n\
<language>C</language>\n\
<languageEndonym></languageEndonym>\n\
<script>AnyScript</script>\n\
<country>AnyCountry</country>\n\
<countryEndonym></countryEndonym>\n\
<decimal>46</decimal>\n\
<group>44</group>\n\
<list>59</list>\n\
<percent>37</percent>\n\
<zero>48</zero>\n\
<minus>45</minus>\n\
<plus>43</plus>\n\
<exp>101</exp>\n\
<quotationStart>\"</quotationStart>\n\
<quotationEnd>\"</quotationEnd>\n\
<alternateQuotationStart>\'</alternateQuotationStart>\n\
<alternateQuotationEnd>\'</alternateQuotationEnd>\n\
<listPatternPartStart>%1, %2</listPatternPartStart>\n\
<listPatternPartMiddle>%1, %2</listPatternPartMiddle>\n\
<listPatternPartEnd>%1, %2</listPatternPartEnd>\n\
<listPatternPartTwo>%1, %2</listPatternPartTwo>\n\
<am>AM</am>\n\
<pm>PM</pm>\n\
<firstDayOfWeek>mon</firstDayOfWeek>\n\
<weekendStart>sat</weekendStart>\n\
<weekendEnd>sun</weekendEnd>\n\
<longDateFormat>EEEE, d MMMM yyyy</longDateFormat>\n\
<shortDateFormat>d MMM yyyy</shortDateFormat>\n\
<longTimeFormat>HH:mm:ss z</longTimeFormat>\n\
<shortTimeFormat>HH:mm:ss</shortTimeFormat>\n\
<standaloneLongMonths>January;February;March;April;May;June;July;August;September;October;November;December;</standaloneLongMonths>\n\
<standaloneShortMonths>Jan;Feb;Mar;Apr;May;Jun;Jul;Aug;Sep;Oct;Nov;Dec;</standaloneShortMonths>\n\
<standaloneNarrowMonths>J;F;M;A;M;J;J;A;S;O;N;D;</standaloneNarrowMonths>\n\
<longMonths>January;February;March;April;May;June;July;August;September;October;November;December;</longMonths>\n\
<shortMonths>Jan;Feb;Mar;Apr;May;Jun;Jul;Aug;Sep;Oct;Nov;Dec;</shortMonths>\n\
<narrowMonths>1;2;3;4;5;6;7;8;9;10;11;12;</narrowMonths>\n\
<longDays>Sunday;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;</longDays>\n\
<shortDays>Sun;Mon;Tue;Wed;Thu;Fri;Sat;</shortDays>\n\
<narrowDays>7;1;2;3;4;5;6;</narrowDays>\n\
<standaloneLongDays>Sunday;Monday;Tuesday;Wednesday;Thursday;Friday;Saturday;</standaloneLongDays>\n\
<standaloneShortDays>Sun;Mon;Tue;Wed;Thu;Fri;Sat;</standaloneShortDays>\n\
<standaloneNarrowDays>S;M;T;W;T;F;S;</standaloneNarrowDays>\n\
<currencyIsoCode></currencyIsoCode>\n\
<currencySymbol></currencySymbol>\n\
<currencyDisplayName>;;;;;;;</currencyDisplayName>\n\
<currencyDigits>2</currencyDigits>\n\
<currencyRounding>1</currencyRounding>\n\
<currencyFormat>%1%2</currencyFormat>\n\
<currencyNegativeFormat></currencyNegativeFormat>\n\
</locale>"
for key in locale_keys:
l = locale_database[key]
print " <locale>"
print " <language>" + l['language'] + "</language>"
print " <languageEndonym>" + escape(l['language_endonym']).encode('utf-8') + "</languageEndonym>"
print " <script>" + l['script'] + "</script>"
print " <country>" + l['country'] + "</country>"
print " <countryEndonym>" + escape(l['country_endonym']).encode('utf-8') + "</countryEndonym>"
print " <languagecode>" + l['language_code'] + "</languagecode>"
print " <scriptcode>" + l['script_code'] + "</scriptcode>"
print " <countrycode>" + l['country_code'] + "</countrycode>"
print " <decimal>" + ordStr(l['decimal']) + "</decimal>"
print " <group>" + ordStr(l['group']) + "</group>"
print " <list>" + fixOrdStrList(l['list']) + "</list>"
print " <percent>" + fixOrdStrPercent(l['percent']) + "</percent>"
print " <zero>" + ordStr(l['zero']) + "</zero>"
print " <minus>" + fixOrdStrMinus(l['minus']) + "</minus>"
print " <plus>" + fixOrdStrPlus(l['plus']) + "</plus>"
print " <exp>" + fixOrdStrExp(l['exp']) + "</exp>"
print " <quotationStart>" + l['quotationStart'].encode('utf-8') + "</quotationStart>"
print " <quotationEnd>" + l['quotationEnd'].encode('utf-8') + "</quotationEnd>"
print " <alternateQuotationStart>" + l['alternateQuotationStart'].encode('utf-8') + "</alternateQuotationStart>"
print " <alternateQuotationEnd>" + l['alternateQuotationEnd'].encode('utf-8') + "</alternateQuotationEnd>"
print " <listPatternPartStart>" + l['listPatternPartStart'].encode('utf-8') + "</listPatternPartStart>"
print " <listPatternPartMiddle>" + l['listPatternPartMiddle'].encode('utf-8') + "</listPatternPartMiddle>"
print " <listPatternPartEnd>" + l['listPatternPartEnd'].encode('utf-8') + "</listPatternPartEnd>"
print " <listPatternPartTwo>" + l['listPatternPartTwo'].encode('utf-8') + "</listPatternPartTwo>"
print " <am>" + l['am'].encode('utf-8') + "</am>"
print " <pm>" + l['pm'].encode('utf-8') + "</pm>"
print " <firstDayOfWeek>" + l['firstDayOfWeek'].encode('utf-8') + "</firstDayOfWeek>"
print " <weekendStart>" + l['weekendStart'].encode('utf-8') + "</weekendStart>"
print " <weekendEnd>" + l['weekendEnd'].encode('utf-8') + "</weekendEnd>"
print " <longDateFormat>" + l['longDateFormat'].encode('utf-8') + "</longDateFormat>"
print " <shortDateFormat>" + l['shortDateFormat'].encode('utf-8') + "</shortDateFormat>"
print " <longTimeFormat>" + l['longTimeFormat'].encode('utf-8') + "</longTimeFormat>"
print " <shortTimeFormat>" + l['shortTimeFormat'].encode('utf-8') + "</shortTimeFormat>"
print " <standaloneLongMonths>" + l['standaloneLongMonths'].encode('utf-8') + "</standaloneLongMonths>"
print " <standaloneShortMonths>"+ l['standaloneShortMonths'].encode('utf-8') + "</standaloneShortMonths>"
print " <standaloneNarrowMonths>"+ l['standaloneNarrowMonths'].encode('utf-8') + "</standaloneNarrowMonths>"
print " <longMonths>" + l['longMonths'].encode('utf-8') + "</longMonths>"
print " <shortMonths>" + l['shortMonths'].encode('utf-8') + "</shortMonths>"
print " <narrowMonths>" + l['narrowMonths'].encode('utf-8') + "</narrowMonths>"
print " <longDays>" + l['longDays'].encode('utf-8') + "</longDays>"
print " <shortDays>" + l['shortDays'].encode('utf-8') + "</shortDays>"
print " <narrowDays>" + l['narrowDays'].encode('utf-8') + "</narrowDays>"
print " <standaloneLongDays>" + l['standaloneLongDays'].encode('utf-8') + "</standaloneLongDays>"
print " <standaloneShortDays>" + l['standaloneShortDays'].encode('utf-8') + "</standaloneShortDays>"
print " <standaloneNarrowDays>" + l['standaloneNarrowDays'].encode('utf-8') + "</standaloneNarrowDays>"
print " <currencyIsoCode>" + l['currencyIsoCode'].encode('utf-8') + "</currencyIsoCode>"
print " <currencySymbol>" + l['currencySymbol'].encode('utf-8') + "</currencySymbol>"
print " <currencyDisplayName>" + l['currencyDisplayName'].encode('utf-8') + "</currencyDisplayName>"
print " <currencyDigits>" + str(l['currencyDigits']) + "</currencyDigits>"
print " <currencyRounding>" + str(l['currencyRounding']) + "</currencyRounding>"
print " <currencyFormat>" + l['currencyFormat'].encode('utf-8') + "</currencyFormat>"
print " <currencyNegativeFormat>" + l['currencyNegativeFormat'].encode('utf-8') + "</currencyNegativeFormat>"
print " </locale>"
print " </localeList>"
print "</localeDatabase>"
|
bsd-3-clause
|
habeanf/Open-Knesset
|
agendas/listeners.py
|
14
|
4868
|
#encoding: utf-8
import datetime
from django.db.models.signals import post_save, pre_delete, post_delete
from django.contrib.contenttypes.models import ContentType
from planet.models import Feed, Post
from actstream import action
from actstream.models import Follow
from knesset.utils import cannonize, disable_for_loaddata
from agendas.models import AgendaVote, AgendaMeeting, AgendaBill, Agenda
from links.models import Link, LinkType
@disable_for_loaddata
def record_agenda_ascription_action(sender, created, instance, **kwargs):
if created:
action.send(instance.agenda, verb='agenda ascribed',
description='agenda "%s" ascribed to vote "%s"' %
(instance.agenda.__unicode__(),instance.vote.title),
target = instance,
timestamp = datetime.datetime.now())
else:
action.send(instance.agenda, verb='agenda-vote relation updated',
description='relation between agenda "%s" and vote "%s" was updated' %
(instance.agenda.__unicode__(),instance.vote.title),
target = instance,
timestamp = datetime.datetime.now())
post_save.connect(record_agenda_ascription_action, sender=AgendaVote)
@disable_for_loaddata
def record_agenda_removal_action(sender, instance, **kwargs):
action.send(instance.agenda, verb='agenda removed',
description="agenda %s removed from vote %s" %
(instance.agenda.name,instance.vote.title),
target = instance.vote,
timestamp = datetime.datetime.now())
pre_delete.connect(record_agenda_removal_action, sender=AgendaVote)
@disable_for_loaddata
def record_agenda_bill_ascription_action(sender, created, instance, **kwargs):
if created:
action.send(instance.agenda, verb='agenda_bill_ascribed',
description='agenda "%s" ascribed to bill "%s"' %
(instance.agenda.__unicode__(),instance.bill.full_title),
target = instance,
timestamp = datetime.datetime.now())
else:
action.send(instance.agenda, verb='agenda_bill_relation_updated',
description='relation between agenda "%s" and bill "%s" was updated' %
(instance.agenda.__unicode__(),instance.bill.full_title),
target = instance,
timestamp = datetime.datetime.now())
post_save.connect(record_agenda_bill_ascription_action, sender=AgendaBill)
@disable_for_loaddata
def record_agenda_bill_removal_action(sender, instance, **kwargs):
action.send(instance.agenda, verb='agenda removed',
description="agenda %s removed from bill %s" %
(instance.agenda.name,instance.bill.full_title),
target = instance.bill,
timestamp = datetime.datetime.now())
pre_delete.connect(record_agenda_bill_removal_action, sender=AgendaBill)
@disable_for_loaddata
def record_agenda_meeting_ascription_action(sender, created, instance, **kwargs):
if created:
action.send(instance.agenda, verb='agenda_meeting_ascribed',
description='agenda "%s" ascribed to meeting "%s"' %
(instance.agenda.__unicode__(),instance.meeting.title()),
target = instance,
timestamp = datetime.datetime.now())
else:
action.send(instance.agenda, verb='agenda_meeting_relation_updated',
description='relation between agenda "%s" and meeting "%s" was updated' %
(instance.agenda.__unicode__(), instance.meeting.title()),
target = instance,
timestamp = datetime.datetime.now())
post_save.connect(record_agenda_meeting_ascription_action, sender=AgendaMeeting)
@disable_for_loaddata
def record_agenda_meeting_removal_action(sender, instance, **kwargs):
action.send(instance.agenda, verb='agenda_meeting_removed',
description='agenda "%s" removed from meeting "%s"' %
(instance.agenda.__unicode__(),instance.meeting.title()),
target = instance.meeting,
timestamp = datetime.datetime.now())
pre_delete.connect(record_agenda_meeting_removal_action, sender=AgendaMeeting)
@disable_for_loaddata
def update_num_followers(sender, instance, **kwargs):
agenda = instance.actor
if isinstance(agenda, Agenda):
agenda.num_followers = Follow.objects.filter(
content_type = ContentType.objects.get(
app_label="agendas",
model="agenda").id,
object_id=agenda.id).count()
agenda.save()
post_delete.connect(update_num_followers, sender=Follow)
post_save.connect(update_num_followers, sender=Follow)
|
bsd-3-clause
|
Chipe1/aima-python
|
tests/test_probabilistic_learning.py
|
2
|
1122
|
import random
import pytest
from learning import DataSet
from probabilistic_learning import *
random.seed("aima-python")
def test_naive_bayes():
iris = DataSet(name='iris')
# discrete
nbd = NaiveBayesLearner(iris, continuous=False)
assert nbd([5, 3, 1, 0.1]) == 'setosa'
assert nbd([6, 3, 4, 1.1]) == 'versicolor'
assert nbd([7.7, 3, 6, 2]) == 'virginica'
# continuous
nbc = NaiveBayesLearner(iris, continuous=True)
assert nbc([5, 3, 1, 0.1]) == 'setosa'
assert nbc([6, 5, 3, 1.5]) == 'versicolor'
assert nbc([7, 3, 6.5, 2]) == 'virginica'
# simple
data1 = 'a' * 50 + 'b' * 30 + 'c' * 15
dist1 = CountingProbDist(data1)
data2 = 'a' * 30 + 'b' * 45 + 'c' * 20
dist2 = CountingProbDist(data2)
data3 = 'a' * 20 + 'b' * 20 + 'c' * 35
dist3 = CountingProbDist(data3)
dist = {('First', 0.5): dist1, ('Second', 0.3): dist2, ('Third', 0.2): dist3}
nbs = NaiveBayesLearner(dist, simple=True)
assert nbs('aab') == 'First'
assert nbs(['b', 'b']) == 'Second'
assert nbs('ccbcc') == 'Third'
if __name__ == "__main__":
pytest.main()
|
mit
|
yamahata/neutron
|
neutron/services/loadbalancer/agent_scheduler.py
|
13
|
5191
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from neutron.common import constants
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import model_base
from neutron.extensions import lbaas_agentscheduler
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class PoolLoadbalancerAgentBinding(model_base.BASEV2):
"""Represents binding between neutron loadbalancer pools and agents."""
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id", ondelete='CASCADE'),
primary_key=True)
agent = orm.relation(agents_db.Agent)
agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id",
ondelete='CASCADE'))
class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin,
lbaas_agentscheduler
.LbaasAgentSchedulerPluginBase):
def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None):
query = context.session.query(PoolLoadbalancerAgentBinding)
query = query.options(joinedload('agent'))
binding = query.get(pool_id)
if (binding and self.is_eligible_agent(
active, binding.agent)):
return {'agent': self._make_agent_dict(binding.agent)}
def get_lbaas_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter_by(agent_type=constants.AGENT_TYPE_LOADBALANCER)
if active is not None:
query = query.filter_by(admin_state_up=active)
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
return [agent
for agent in query
if self.is_eligible_agent(active, agent)]
def list_pools_on_lbaas_agent(self, context, id):
query = context.session.query(PoolLoadbalancerAgentBinding.pool_id)
query = query.filter_by(agent_id=id)
pool_ids = [item[0] for item in query]
if pool_ids:
return {'pools': self.get_pools(context, filters={'id': pool_ids})}
else:
return {'pools': []}
def get_lbaas_agent_candidates(self, device_driver, active_agents):
candidates = []
for agent in active_agents:
agent_conf = self.get_configuration_dict(agent)
if device_driver in agent_conf['device_drivers']:
candidates.append(agent)
return candidates
class ChanceScheduler(object):
"""Allocate a loadbalancer agent for a vip in a random way."""
def schedule(self, plugin, context, pool, device_driver):
"""Schedule the pool to an active loadbalancer agent if there
is no enabled agent hosting it.
"""
with context.session.begin(subtransactions=True):
lbaas_agent = plugin.get_lbaas_agent_hosting_pool(
context, pool['id'])
if lbaas_agent:
LOG.debug(_('Pool %(pool_id)s has already been hosted'
' by lbaas agent %(agent_id)s'),
{'pool_id': pool['id'],
'agent_id': lbaas_agent['id']})
return
active_agents = plugin.get_lbaas_agents(context, active=True)
if not active_agents:
LOG.warn(_('No active lbaas agents for pool %s'), pool['id'])
return
candidates = plugin.get_lbaas_agent_candidates(device_driver,
active_agents)
if not candidates:
LOG.warn(_('No lbaas agent supporting device driver %s'),
device_driver)
return
chosen_agent = random.choice(candidates)
binding = PoolLoadbalancerAgentBinding()
binding.agent = chosen_agent
binding.pool_id = pool['id']
context.session.add(binding)
LOG.debug(_('Pool %(pool_id)s is scheduled to '
'lbaas agent %(agent_id)s'),
{'pool_id': pool['id'],
'agent_id': chosen_agent['id']})
return chosen_agent
|
apache-2.0
|
mxOBS/deb-pkg_trusty_chromium-browser
|
ppapi/native_client/tools/browser_tester/browsertester/browserprocess.py
|
126
|
2032
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import signal
import subprocess
import time
class BrowserProcessBase(object):
def __init__(self, handle):
self.handle = handle
print 'PID', self.handle.pid
def GetReturnCode(self):
return self.handle.returncode
def IsRunning(self):
return self.handle.poll() is None
def Wait(self, wait_steps, sleep_time):
try:
self.term()
except Exception:
# Terminating the handle can raise an exception. There is likely no point
# in waiting if the termination didn't succeed.
return
i = 0
# subprocess.wait() doesn't have a timeout, unfortunately.
while self.IsRunning() and i < wait_steps:
time.sleep(sleep_time)
i += 1
def Kill(self):
if self.IsRunning():
print 'KILLING the browser'
try:
self.kill()
# If it doesn't die, we hang. Oh well.
self.handle.wait()
except Exception:
# If it is already dead, then it's ok.
# This may happen if the browser dies after the first poll, but
# before the kill.
if self.IsRunning():
raise
class BrowserProcess(BrowserProcessBase):
def term(self):
self.handle.terminate()
def kill(self):
self.handle.kill()
class BrowserProcessPosix(BrowserProcessBase):
""" This variant of BrowserProcess uses process groups to manage browser
life time. """
def term(self):
os.killpg(self.handle.pid, signal.SIGTERM)
def kill(self):
os.killpg(self.handle.pid, signal.SIGKILL)
def RunCommandWithSubprocess(cmd, env=None):
handle = subprocess.Popen(cmd, env=env)
return BrowserProcess(handle)
def RunCommandInProcessGroup(cmd, env=None):
def SetPGrp():
os.setpgrp()
print 'I\'M THE SESSION LEADER!'
handle = subprocess.Popen(cmd, env=env, preexec_fn=SetPGrp)
return BrowserProcessPosix(handle)
|
bsd-3-clause
|
houssine78/vertical-travel-porting-v8-wip
|
__unported__/travel_passport/travel_passenger.py
|
2
|
1291
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class travel_passenger(orm.Model):
_inherit = 'travel.passenger'
_columns = {
'passport_id': fields.many2one('res.passport', 'Passport',
help="Passport to use on Travel."),
}
|
agpl-3.0
|
mckinsel/FastxIO
|
tests/test_fastq.py
|
1
|
2141
|
import os
import unittest
from . import TEST_DATA_DIR
from FastxIO import fastx
class TestFastq(unittest.TestCase):
def setUp(self):
self.fastq_file = os.path.join(TEST_DATA_DIR, "test.fastq")
self.gz_fastq_file = os.path.join(TEST_DATA_DIR, "test.fastq.gz")
self.windows_fastq_file = os.path.join(TEST_DATA_DIR, "test_windows_endings.fastq")
self.gz_windows_fastq_file = os.path.join(TEST_DATA_DIR, "test_windows_endings.fastq.gz")
def _verify_fastq(self, reader):
rec_list = list(reader)
self.assertEqual(len(rec_list), 250)
self.assertEqual(rec_list[0].name, "IRIS:7:1:17:394#0/1")
self.assertEqual(rec_list[0].sequence, "GTCAGGACAAGAAAGACAANTCCAATTNACATTATG")
self.assertEqual(rec_list[0].quality, "aaabaa`]baaaaa_aab]D^^`b`aYDW]abaa`^")
self.assertEqual(rec_list[-1].name, "IRIS:7:1:39:1454#0/1")
self.assertEqual(rec_list[-1].sequence, "TCATTGCTAAAGACTTGTGTCTTCCCGACCAGAGGG")
self.assertEqual(rec_list[-1].quality, "abbaaababaaaaaaaaaa`aaa___^__]]^[^^Y")
def test_read_fastq(self):
reader = fastx.FastqReader(self.fastq_file)
self._verify_fastq(reader)
def test_read_fastx(self):
reader = fastx.FastxReader(self.fastq_file)
self._verify_fastq(reader)
def test_read_gz_fastq(self):
reader = fastx.FastqReader(self.gz_fastq_file)
self._verify_fastq(reader)
def test_read_gz_fastx(self):
reader = fastx.FastxReader(self.gz_fastq_file)
self._verify_fastq(reader)
def test_read_windows_fastq(self):
reader = fastx.FastqReader(self.windows_fastq_file)
self._verify_fastq(reader)
def test_read_windows_fastx(self):
reader = fastx.FastxReader(self.windows_fastq_file)
self._verify_fastq(reader)
def test_read_gz_windows_fastq(self):
reader = fastx.FastqReader(self.gz_windows_fastq_file)
self._verify_fastq(reader)
def test_read_gz_windows_fastx(self):
reader = fastx.FastxReader(self.gz_windows_fastq_file)
self._verify_fastq(reader)
|
mit
|
galaxy001/libtorrent
|
BitTorrent-4.4.0/khashmir/util.py
|
11
|
2217
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
from struct import pack, unpack
def bucket_stats(l):
"""given a list of khashmir instances, finds min, max, and average number of nodes in tables"""
max = avg = 0
min = None
def count(buckets):
c = 0
for bucket in buckets:
c = c + len(bucket.l)
return c
for node in l:
c = count(node.table.buckets)
if min == None:
min = c
elif c < min:
min = c
if c > max:
max = c
avg = avg + c
avg = avg / len(l)
return {'min':min, 'max':max, 'avg':avg}
def compact_peer_info(ip, port):
return pack('!BBBBH', *([int(i) for i in ip.split('.')] + [port]))
def packPeers(peers):
return map(lambda a: compact_peer_info(a[0], a[1]), peers)
def reducePeers(peers):
return reduce(lambda a, b: a + b, peers, '')
def unpackPeers(p):
peers = []
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = unpack('!H', p[x+4:x+6])[0]
peers.append((ip, port, None))
else:
for x in p:
peers.append((x['ip'], x['port'], x.get('peer id')))
return peers
def compact_node_info(id, ip, port):
return id + compact_peer_info(ip, port)
def packNodes(nodes):
return ''.join([compact_node_info(x['id'], x['host'], x['port']) for x in nodes])
def unpackNodes(n):
nodes = []
for x in xrange(0, len(n), 26):
id = n[x:x+20]
ip = '.'.join([str(ord(i)) for i in n[x+20:x+24]])
port = unpack('!H', n[x+24:x+26])[0]
nodes.append({'id':id, 'host':ip, 'port': port})
return nodes
|
mit
|
ita1024/samba
|
third_party/waf/wafadmin/3rdparty/batched_cc.py
|
32
|
4653
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"""
Batched builds - compile faster
instead of compiling object files one by one, c/c++ compilers are often able to compile at once:
cc -c ../file1.c ../file2.c ../file3.c
Files are output on the directory where the compiler is called, and dependencies are more difficult
to track (do not run the command on all source files if only one file changes)
As such, we do as if the files were compiled one by one, but no command is actually run:
replace each cc/cpp Task by a TaskSlave
A new task called TaskMaster collects the signatures from each slave and finds out the command-line
to run.
To set this up, the method ccroot::create_task is replaced by a new version, to enable batched builds
it is only necessary to import this module in the configuration (no other change required)
"""
MAX_BATCH = 50
MAXPARALLEL = False
EXT_C = ['.c', '.cc', '.cpp', '.cxx']
import os, threading
import TaskGen, Task, ccroot, Build, Logs
from TaskGen import extension, feature, before
from Constants import *
cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} -c ${SRCLST}'
cc_fun = Task.compile_fun_noshell('batched_cc', cc_str)[0]
cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} -c ${SRCLST}'
cxx_fun = Task.compile_fun_noshell('batched_cxx', cxx_str)[0]
count = 70000
class batch_task(Task.Task):
color = 'RED'
after = 'cc cxx'
before = 'cc_link cxx_link static_link'
def __str__(self):
return '(batch compilation for %d slaves)\n' % len(self.slaves)
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.slaves = []
self.inputs = []
self.hasrun = 0
global count
count += 1
self.idx = count
def add_slave(self, slave):
self.slaves.append(slave)
self.set_run_after(slave)
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
for t in self.slaves:
#if t.executed:
if t.hasrun != SKIPPED:
return RUN_ME
return SKIP_ME
def run(self):
outputs = []
self.outputs = []
srclst = []
slaves = []
for t in self.slaves:
if t.hasrun != SKIPPED:
slaves.append(t)
srclst.append(t.inputs[0].abspath(self.env))
self.env.SRCLST = srclst
self.cwd = slaves[0].inputs[0].parent.abspath(self.env)
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
env._CCINCFLAGS = env.CXXINCFLAGS = []
# local flags come first
# set the user-defined includes paths
for i in env['INC_PATHS']:
app('_CCINCFLAGS', cpppath_st % i.abspath())
app('_CXXINCFLAGS', cpppath_st % i.abspath())
app('_CCINCFLAGS', cpppath_st % i.abspath(env))
app('_CXXINCFLAGS', cpppath_st % i.abspath(env))
# set the library include paths
for i in env['CPPPATH']:
app('_CCINCFLAGS', cpppath_st % i)
app('_CXXINCFLAGS', cpppath_st % i)
if self.slaves[0].__class__.__name__ == 'cc':
ret = cc_fun(self)
else:
ret = cxx_fun(self)
if ret:
return ret
for t in slaves:
t.old_post_run()
from TaskGen import extension, feature, after
import cc, cxx
def wrap(fun):
def foo(self, node):
# we cannot control the extension, this sucks
self.obj_ext = '.o'
task = fun(self, node)
if not getattr(self, 'masters', None):
self.masters = {}
self.allmasters = []
if not node.parent.id in self.masters:
m = self.masters[node.parent.id] = self.master = self.create_task('batch')
self.allmasters.append(m)
else:
m = self.masters[node.parent.id]
if len(m.slaves) > MAX_BATCH:
m = self.masters[node.parent.id] = self.master = self.create_task('batch')
self.allmasters.append(m)
m.add_slave(task)
return task
return foo
c_hook = wrap(cc.c_hook)
extension(cc.EXT_CC)(c_hook)
cxx_hook = wrap(cxx.cxx_hook)
extension(cxx.EXT_CXX)(cxx_hook)
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_link')
def link_after_masters(self):
if getattr(self, 'allmasters', None):
for m in self.allmasters:
self.link_task.set_run_after(m)
for c in ['cc', 'cxx']:
t = Task.TaskBase.classes[c]
def run(self):
pass
def post_run(self):
#self.executed=1
pass
def can_retrieve_cache(self):
if self.old_can_retrieve_cache():
for m in self.generator.allmasters:
try:
m.slaves.remove(self)
except ValueError:
pass #this task wasn't included in that master
return 1
else:
return None
setattr(t, 'oldrun', t.__dict__['run'])
setattr(t, 'run', run)
setattr(t, 'old_post_run', t.post_run)
setattr(t, 'post_run', post_run)
setattr(t, 'old_can_retrieve_cache', t.can_retrieve_cache)
setattr(t, 'can_retrieve_cache', can_retrieve_cache)
|
gpl-3.0
|
TalShafir/ansible
|
lib/ansible/modules/cloud/google/gcp_container_node_pool.py
|
8
|
32420
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_container_node_pool
description:
- NodePool contains the name and configuration for a cluster's node pool.
- Node pools are a set of nodes (i.e. VM's), with a common configuration and specification,
under the control of the cluster master. They may have a set of Kubernetes labels
applied to them, which may be used to reference them during pod scheduling. They
may also be resized up or down, to accommodate the workload.
short_description: Creates a GCP NodePool
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the node pool.
required: false
config:
description:
- The node configuration of the pool.
required: false
suboptions:
machine_type:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
required: false
disk_size_gb:
description:
- Size of the disk attached to each node, specified in GB. The smallest allowed disk
size is 10GB. If unspecified, the default disk size is 100GB.
required: false
oauth_scopes:
description:
- The set of Google API scopes to be made available on all of the node VMs under the
"default" service account.
- 'The following scopes are recommended, but not required, and by default are not
included: U(https://www.googleapis.com/auth/compute) is required for mounting persistent
storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for communicating
with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are
enabled, in which case their required scopes will be added.
required: false
service_account:
description:
- The Google Cloud Platform Service Account to be used by the node VMs. If no Service
Account is specified, the "default" service account is used.
required: false
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes in length.
These are reflected as part of a URL in the metadata server. Additionally, to avoid
ambiguity, keys must not conflict with any other metadata keys for the project or
be one of the four reserved keys: "instance-template", "kube-env", "startup-script",
and "user-data" Values are free-form strings, and only have meaning as interpreted
by the image running in the instance. The only restriction placed on them is that
each value''s size must be less than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
required: false
image_type:
description:
- The image type to use for this node. Note that for a given image type, the latest
version of it will be used.
required: false
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each node.
These will added in addition to any default label(s) that Kubernetes may apply to
the node. In case of conflict in label keys, the applied set may differ depending
on the Kubernetes version -- it''s best to assume the behavior is undefined and
conflicts should be avoided. For more information, including usage and the valid
values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) An object
containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
required: false
local_ssd_count:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks available
on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) for
more information.'
required: false
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify valid
sources or targets for network firewalls and are specified by the client during
cluster or node pool creation. Each tag within the list must comply with RFC1035.
required: false
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more inforamtion about preemptible VM instances.'
required: false
type: bool
initial_node_count:
description:
- The initial node count for the pool. You must ensure that your Compute Engine resource
quota is sufficient for this number of instances. You must also have available firewall
and routes quota.
required: true
autoscaling:
description:
- Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid
configuration is present.
required: false
suboptions:
enabled:
description:
- Is autoscaling enabled for this node pool.
required: false
type: bool
min_node_count:
description:
- Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount.
required: false
max_node_count:
description:
- Maximum number of nodes in the NodePool. Must be >= minNodeCount.
- There has to enough quota to scale up the cluster.
required: false
management:
description:
- Management configuration for this NodePool.
required: false
suboptions:
auto_upgrade:
description:
- A flag that specifies whether node auto-upgrade is enabled for the node pool. If
enabled, node auto-upgrade helps keep the nodes in your node pool up to date with
the latest release version of Kubernetes.
required: false
type: bool
auto_repair:
description:
- A flag that specifies whether the node auto-repair is enabled for the node pool.
If enabled, the nodes in this node pool will be monitored and, if they fail health
checks too many times, an automatic repair action will be triggered.
required: false
type: bool
upgrade_options:
description:
- Specifies the Auto Upgrade knobs for the node pool.
required: false
suboptions:
auto_upgrade_start_time:
description:
- This field is set when upgrades are about to commence with the approximate start
time for the upgrades, in RFC3339 text format.
required: false
description:
description:
- This field is set when upgrades are about to commence with the description of the
upgrade.
required: false
cluster:
description:
- The cluster this node pool belongs to.
- 'This field represents a link to a Cluster resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_container_cluster
task and then set this cluster field to "{{ name-of-resource }}" Alternatively,
you can set this cluster to a dictionary with the name key where the value is the
name of your Cluster.'
required: true
zone:
description:
- The zone where the node pool is deployed.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a cluster
gcp_container_cluster:
name: "cluster-nodepool"
initial_node_count: 4
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: cluster
- name: create a node pool
gcp_container_node_pool:
name: "test_object"
initial_node_count: 4
cluster: "{{ cluster }}"
zone: us-central1-a
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The name of the node pool.
returned: success
type: str
config:
description:
- The node configuration of the pool.
returned: success
type: complex
contains:
machineType:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
returned: success
type: str
diskSizeGb:
description:
- Size of the disk attached to each node, specified in GB. The smallest allowed disk
size is 10GB. If unspecified, the default disk size is 100GB.
returned: success
type: int
oauthScopes:
description:
- The set of Google API scopes to be made available on all of the node VMs under the
"default" service account.
- 'The following scopes are recommended, but not required, and by default are not
included: U(https://www.googleapis.com/auth/compute) is required for mounting persistent
storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for communicating
with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are
enabled, in which case their required scopes will be added.
returned: success
type: list
serviceAccount:
description:
- The Google Cloud Platform Service Account to be used by the node VMs. If no Service
Account is specified, the "default" service account is used.
returned: success
type: str
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes in length.
These are reflected as part of a URL in the metadata server. Additionally, to avoid
ambiguity, keys must not conflict with any other metadata keys for the project or
be one of the four reserved keys: "instance-template", "kube-env", "startup-script",
and "user-data" Values are free-form strings, and only have meaning as interpreted
by the image running in the instance. The only restriction placed on them is that
each value''s size must be less than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
imageType:
description:
- The image type to use for this node. Note that for a given image type, the latest
version of it will be used.
returned: success
type: str
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each node.
These will added in addition to any default label(s) that Kubernetes may apply to
the node. In case of conflict in label keys, the applied set may differ depending
on the Kubernetes version -- it''s best to assume the behavior is undefined and
conflicts should be avoided. For more information, including usage and the valid
values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html) An object
containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
localSsdCount:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks available
on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits) for
more information.'
returned: success
type: int
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify valid
sources or targets for network firewalls and are specified by the client during
cluster or node pool creation. Each tag within the list must comply with RFC1035.
returned: success
type: list
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more inforamtion about preemptible VM instances.'
returned: success
type: bool
initialNodeCount:
description:
- The initial node count for the pool. You must ensure that your Compute Engine resource
quota is sufficient for this number of instances. You must also have available firewall
and routes quota.
returned: success
type: int
version:
description:
- The version of the Kubernetes of this node.
returned: success
type: str
autoscaling:
description:
- Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid
configuration is present.
returned: success
type: complex
contains:
enabled:
description:
- Is autoscaling enabled for this node pool.
returned: success
type: bool
minNodeCount:
description:
- Minimum number of nodes in the NodePool. Must be >= 1 and <= maxNodeCount.
returned: success
type: int
maxNodeCount:
description:
- Maximum number of nodes in the NodePool. Must be >= minNodeCount.
- There has to enough quota to scale up the cluster.
returned: success
type: int
management:
description:
- Management configuration for this NodePool.
returned: success
type: complex
contains:
autoUpgrade:
description:
- A flag that specifies whether node auto-upgrade is enabled for the node pool. If
enabled, node auto-upgrade helps keep the nodes in your node pool up to date with
the latest release version of Kubernetes.
returned: success
type: bool
autoRepair:
description:
- A flag that specifies whether the node auto-repair is enabled for the node pool.
If enabled, the nodes in this node pool will be monitored and, if they fail health
checks too many times, an automatic repair action will be triggered.
returned: success
type: bool
upgradeOptions:
description:
- Specifies the Auto Upgrade knobs for the node pool.
returned: success
type: complex
contains:
autoUpgradeStartTime:
description:
- This field is set when upgrades are about to commence with the approximate start
time for the upgrades, in RFC3339 text format.
returned: success
type: str
description:
description:
- This field is set when upgrades are about to commence with the description of the
upgrade.
returned: success
type: str
cluster:
description:
- The cluster this node pool belongs to.
returned: success
type: dict
zone:
description:
- The zone where the node pool is deployed.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(type='str'),
config=dict(type='dict', options=dict(
machine_type=dict(type='str'),
disk_size_gb=dict(type='int'),
oauth_scopes=dict(type='list', elements='str'),
service_account=dict(type='str'),
metadata=dict(type='dict'),
image_type=dict(type='str'),
labels=dict(type='dict'),
local_ssd_count=dict(type='int'),
tags=dict(type='list', elements='str'),
preemptible=dict(type='bool')
)),
initial_node_count=dict(required=True, type='int'),
autoscaling=dict(type='dict', options=dict(
enabled=dict(type='bool'),
min_node_count=dict(type='int'),
max_node_count=dict(type='int')
)),
management=dict(type='dict', options=dict(
auto_upgrade=dict(type='bool'),
auto_repair=dict(type='bool'),
upgrade_options=dict(type='dict', options=dict(
auto_upgrade_start_time=dict(type='str'),
description=dict(type='str')
))
)),
cluster=dict(required=True, type='dict'),
zone=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'container')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link):
auth = GcpSession(module, 'container')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link):
auth = GcpSession(module, 'container')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name'),
u'config': NodePoolConfig(module.params.get('config', {}), module).to_request(),
u'initialNodeCount': module.params.get('initial_node_count'),
u'autoscaling': NodePoolAutoscaling(module.params.get('autoscaling', {}), module).to_request(),
u'management': NodePoolManagement(module.params.get('management', {}), module).to_request()
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'container')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
res = {
'project': module.params['project'],
'zone': module.params['zone'],
'cluster': replace_resource_dict(module.params['cluster'], 'name'),
'name': module.params['name']
}
return "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters/{cluster}/nodePools/{name}".format(**res)
def collection(module):
res = {
'project': module.params['project'],
'zone': module.params['zone'],
'cluster': replace_resource_dict(module.params['cluster'], 'name')
}
return "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters/{cluster}/nodePools".format(**res)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name'),
u'config': NodePoolConfig(response.get(u'config', {}), module).from_response(),
u'initialNodeCount': module.params.get('initial_node_count'),
u'version': response.get(u'version'),
u'autoscaling': NodePoolAutoscaling(response.get(u'autoscaling', {}), module).from_response(),
u'management': NodePoolManagement(response.get(u'management', {}), module).from_response()
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response)
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']))
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
if status not in ['PENDING', 'RUNNING', 'DONE', 'ABORTING']:
module.fail_json(msg="Invalid result %s" % status)
op_result = fetch_resource(module, op_uri)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
# Google Container Engine API has its own layout for the create method,
# defined like this:
#
# {
# 'nodePool': {
# ... node pool data
# }
# }
#
# Format the request to match the expected input by the API
def encode_request(resource_request, module):
return {
'nodePool': resource_request
}
class NodePoolConfig(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'machineType': self.request.get('machine_type'),
u'diskSizeGb': self.request.get('disk_size_gb'),
u'oauthScopes': self.request.get('oauth_scopes'),
u'serviceAccount': self.request.get('service_account'),
u'metadata': self.request.get('metadata'),
u'imageType': self.request.get('image_type'),
u'labels': self.request.get('labels'),
u'localSsdCount': self.request.get('local_ssd_count'),
u'tags': self.request.get('tags'),
u'preemptible': self.request.get('preemptible')
})
def from_response(self):
return remove_nones_from_dict({
u'machineType': self.request.get(u'machineType'),
u'diskSizeGb': self.request.get(u'diskSizeGb'),
u'oauthScopes': self.request.get(u'oauthScopes'),
u'serviceAccount': self.request.get(u'serviceAccount'),
u'metadata': self.request.get(u'metadata'),
u'imageType': self.request.get(u'imageType'),
u'labels': self.request.get(u'labels'),
u'localSsdCount': self.request.get(u'localSsdCount'),
u'tags': self.request.get(u'tags'),
u'preemptible': self.request.get(u'preemptible')
})
class NodePoolAutoscaling(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'enabled': self.request.get('enabled'),
u'minNodeCount': self.request.get('min_node_count'),
u'maxNodeCount': self.request.get('max_node_count')
})
def from_response(self):
return remove_nones_from_dict({
u'enabled': self.request.get(u'enabled'),
u'minNodeCount': self.request.get(u'minNodeCount'),
u'maxNodeCount': self.request.get(u'maxNodeCount')
})
class NodePoolManagement(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'autoUpgrade': self.request.get('auto_upgrade'),
u'autoRepair': self.request.get('auto_repair'),
u'upgradeOptions': NodePoolUpgradeOptions(self.request.get('upgrade_options', {}), self.module).to_request()
})
def from_response(self):
return remove_nones_from_dict({
u'autoUpgrade': self.request.get(u'autoUpgrade'),
u'autoRepair': self.request.get(u'autoRepair'),
u'upgradeOptions': NodePoolUpgradeOptions(self.request.get(u'upgradeOptions', {}), self.module).from_response()
})
class NodePoolUpgradeOptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'autoUpgradeStartTime': self.request.get('auto_upgrade_start_time'),
u'description': self.request.get('description')
})
def from_response(self):
return remove_nones_from_dict({
u'autoUpgradeStartTime': self.request.get(u'autoUpgradeStartTime'),
u'description': self.request.get(u'description')
})
if __name__ == '__main__':
main()
|
gpl-3.0
|
remidechazelles/mycroft-core
|
mycroft/tts/fa_tts.py
|
7
|
2194
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import requests
from mycroft.tts import TTSValidator
from mycroft.tts.remote_tts import RemoteTTS
__author__ = 'jdorleans'
class FATTS(RemoteTTS):
PARAMS = {
'voice[name]': 'cmu-slt-hsmm',
'input[type]': 'TEXT',
'input[locale]': 'en_US',
'input[content]': 'Hello World',
'output[format]': 'WAVE_FILE',
'output[type]': 'AUDIO'
}
def __init__(self, lang, voice, url):
super(FATTS, self).__init__(lang, voice, url, '/say',
FATTSValidator(self))
def build_request_params(self, sentence):
params = self.PARAMS.copy()
params['voice[name]'] = self.voice
params['input[locale]'] = self.lang
params['input[content]'] = sentence.encode('utf-8')
return params
class FATTSValidator(TTSValidator):
def __init__(self, tts):
super(FATTSValidator, self).__init__(tts)
def validate_lang(self):
# TODO
pass
def validate_connection(self):
try:
resp = requests.get(self.tts.url + "/info/version", verify=False)
content = resp.json()
if content.get('product', '').find('FA-TTS') < 0:
raise Exception('Invalid FA-TTS server.')
except:
raise Exception(
'FA-TTS server could not be verified. Check your connection '
'to the server: ' + self.tts.url)
def get_tts_class(self):
return FATTS
|
gpl-3.0
|
smallyear/linuxLearn
|
salt/salt/modules/pecl.py
|
1
|
3969
|
# -*- coding: utf-8 -*-
'''
Manage PHP pecl extensions.
'''
from __future__ import absolute_import
# Import python libs
import re
import logging
try:
from shlex import quote as _cmd_quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _cmd_quote
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__)
def __virtual__():
return True if salt.utils.which('pecl') else False
def _pecl(command, defaults=False):
'''
Execute the command passed with pecl
'''
cmdline = 'pecl {0}'.format(command)
if salt.utils.is_true(defaults):
cmdline = 'yes ' "''" + ' | ' + cmdline
ret = __salt__['cmd.run_all'](cmdline, python_shell=True)
if ret['retcode'] == 0:
return ret['stdout']
else:
log.error('Problem running pecl. Is php-pear installed?')
return ''
def install(pecls, defaults=False, force=False, preferred_state='stable'):
'''
.. versionadded:: 0.17.0
Installs one or several pecl extensions.
pecls
The pecl extensions to install.
defaults
Use default answers for extensions such as pecl_http which ask
questions before installation. Without this option, the pecl.installed
state will hang indefinitely when trying to install these extensions.
force
Whether to force the installed version or not
CLI Example:
.. code-block:: bash
salt '*' pecl.install fuse
'''
if isinstance(pecls, six.string_types):
pecls = [pecls]
preferred_state = '-d preferred_state={0}'.format(_cmd_quote(preferred_state))
if force:
return _pecl('{0} install -f {1}'.format(preferred_state, _cmd_quote(' '.join(pecls))),
defaults=defaults)
else:
_pecl('{0} install {1}'.format(preferred_state, _cmd_quote(' '.join(pecls))),
defaults=defaults)
if not isinstance(pecls, list):
pecls = [pecls]
for pecl in pecls:
found = False
if '/' in pecl:
channel, pecl = pecl.split('/')
else:
channel = None
installed_pecls = list_(channel)
for pecl in installed_pecls:
installed_pecl_with_version = '{0}-{1}'.format(
pecl,
installed_pecls.get(pecl)[0]
)
if pecl in installed_pecl_with_version:
found = True
if not found:
return False
return True
def uninstall(pecls):
'''
Uninstall one or several pecl extensions.
pecls
The pecl extensions to uninstall.
CLI Example:
.. code-block:: bash
salt '*' pecl.uninstall fuse
'''
if isinstance(pecls, six.string_types):
pecls = [pecls]
return _pecl('uninstall {0}'.format(_cmd_quote(' '.join(pecls))))
def update(pecls):
'''
Update one or several pecl extensions.
pecls
The pecl extensions to update.
CLI Example:
.. code-block:: bash
salt '*' pecl.update fuse
'''
if isinstance(pecls, six.string_types):
pecls = [pecls]
return _pecl('install -U {0}'.format(_cmd_quote(' '.join(pecls))))
def list_(channel=None):
'''
List installed pecl extensions.
CLI Example:
.. code-block:: bash
salt '*' pecl.list
'''
pecl_channel_pat = re.compile('^([^ ]+)[ ]+([^ ]+)[ ]+([^ ]+)')
pecls = {}
command = 'list'
if channel:
command = '{0} -c {1}'.format(command, _cmd_quote(channel))
lines = _pecl(command).splitlines()
lines = (l for l in lines if pecl_channel_pat.match(l))
for line in lines:
match = pecl_channel_pat.match(line)
if match:
pecls[match.group(1)] = [match.group(2), match.group(3)]
return pecls
|
apache-2.0
|
fzimmermann89/pyload
|
module/lib/beaker/crypto/pbkdf2.py
|
43
|
11924
|
#!/usr/bin/python
# -*- coding: ascii -*-
###########################################################################
# PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation
#
# Copyright (C) 2007 Dwayne C. Litzenberger <[email protected]>
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Country of origin: Canada
#
###########################################################################
# Sample PBKDF2 usage:
# from Crypto.Cipher import AES
# from PBKDF2 import PBKDF2
# import os
#
# salt = os.urandom(8) # 64-bit salt
# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
# iv = os.urandom(16) # 128-bit IV
# cipher = AES.new(key, AES.MODE_CBC, iv)
# ...
#
# Sample crypt() usage:
# from PBKDF2 import crypt
# pwhash = crypt("secret")
# alleged_pw = raw_input("Enter password: ")
# if pwhash == crypt(alleged_pw, pwhash):
# print "Password good"
# else:
# print "Invalid password"
#
###########################################################################
# History:
#
# 2007-07-27 Dwayne C. Litzenberger <[email protected]>
# - Initial Release (v1.0)
#
# 2007-07-31 Dwayne C. Litzenberger <[email protected]>
# - Bugfix release (v1.1)
# - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor
# function in the previous release) silently truncates all keys to 64
# bytes. The way it was used in the previous release, this would only be
# problem if the pseudorandom function that returned values larger than
# 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like
# anything that silently reduces the security margin from what is
# expected.
#
###########################################################################
__version__ = "1.1"
from struct import pack
from binascii import b2a_hex
from random import randint
from base64 import b64encode
from beaker.crypto.util import hmac as HMAC, hmac_sha1 as SHA1
def strxor(a, b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
class PBKDF2(object):
"""PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
This implementation takes a passphrase and a salt (and optionally an
iteration count, a digest module, and a MAC module) and provides a
file-like object from which an arbitrarily-sized key can be read.
If the passphrase and/or salt are unicode objects, they are encoded as
UTF-8 before they are processed.
The idea behind PBKDF2 is to derive a cryptographic key from a
passphrase and a salt.
PBKDF2 may also be used as a strong salted password hash. The
'crypt' function is provided for that purpose.
Remember: Keys generated using PBKDF2 are only as strong as the
passphrases they are derived from.
"""
def __init__(self, passphrase, salt, iterations=1000,
digestmodule=SHA1, macmodule=HMAC):
if not callable(macmodule):
macmodule = macmodule.new
self.__macmodule = macmodule
self.__digestmodule = digestmodule
self._setup(passphrase, salt, iterations, self._pseudorandom)
def _pseudorandom(self, key, msg):
"""Pseudorandom function. e.g. HMAC-SHA1"""
return self.__macmodule(key=key, msg=msg,
digestmod=self.__digestmodule).digest()
def read(self, bytes):
"""Read the specified number of key bytes."""
if self.closed:
raise ValueError("file-like object is closed")
size = len(self.__buf)
blocks = [self.__buf]
i = self.__blockNum
while size < bytes:
i += 1
if i > 0xffffffff:
# We could return "" here, but
raise OverflowError("derived key too long")
block = self.__f(i)
blocks.append(block)
size += len(block)
buf = "".join(blocks)
retval = buf[:bytes]
self.__buf = buf[bytes:]
self.__blockNum = i
return retval
def __f(self, i):
# i must fit within 32 bits
assert (1 <= i <= 0xffffffff)
U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
result = U
for j in xrange(2, 1+self.__iterations):
U = self.__prf(self.__passphrase, U)
result = strxor(result, U)
return result
def hexread(self, octets):
"""Read the specified number of octets. Return them as hexadecimal.
Note that len(obj.hexread(n)) == 2*n.
"""
return b2a_hex(self.read(octets))
def _setup(self, passphrase, salt, iterations, prf):
# Sanity checks:
# passphrase and salt must be str or unicode (in the latter
# case, we convert to UTF-8)
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("UTF-8")
if not isinstance(passphrase, str):
raise TypeError("passphrase must be str or unicode")
if isinstance(salt, unicode):
salt = salt.encode("UTF-8")
if not isinstance(salt, str):
raise TypeError("salt must be str or unicode")
# iterations must be an integer >= 1
if not isinstance(iterations, (int, long)):
raise TypeError("iterations must be an integer")
if iterations < 1:
raise ValueError("iterations must be at least 1")
# prf must be callable
if not callable(prf):
raise TypeError("prf must be callable")
self.__passphrase = passphrase
self.__salt = salt
self.__iterations = iterations
self.__prf = prf
self.__blockNum = 0
self.__buf = ""
self.closed = False
def close(self):
"""Close the stream."""
if not self.closed:
del self.__passphrase
del self.__salt
del self.__iterations
del self.__prf
del self.__blockNum
del self.__buf
self.closed = True
def crypt(word, salt=None, iterations=None):
"""PBKDF2-based unix crypt(3) replacement.
The number of iterations specified in the salt overrides the 'iterations'
parameter.
The effective hash length is 192 bits.
"""
# Generate a (pseudo-)random salt if the user hasn't provided one.
if salt is None:
salt = _makesalt()
# salt must be a string or the us-ascii subset of unicode
if isinstance(salt, unicode):
salt = salt.encode("us-ascii")
if not isinstance(salt, str):
raise TypeError("salt must be a string")
# word must be a string or unicode (in the latter case, we convert to UTF-8)
if isinstance(word, unicode):
word = word.encode("UTF-8")
if not isinstance(word, str):
raise TypeError("word must be a string or unicode")
# Try to extract the real salt and iteration count from the salt
if salt.startswith("$p5k2$"):
(iterations, salt, dummy) = salt.split("$")[2:5]
if iterations == "":
iterations = 400
else:
converted = int(iterations, 16)
if iterations != "%x" % converted: # lowercase hex, minimum digits
raise ValueError("Invalid salt")
iterations = converted
if not (iterations >= 1):
raise ValueError("Invalid salt")
# Make sure the salt matches the allowed character set
allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
for ch in salt:
if ch not in allowed:
raise ValueError("Illegal character %r in salt" % (ch,))
if iterations is None or iterations == 400:
iterations = 400
salt = "$p5k2$$" + salt
else:
salt = "$p5k2$%x$%s" % (iterations, salt)
rawhash = PBKDF2(word, salt, iterations).read(24)
return salt + "$" + b64encode(rawhash, "./")
# Add crypt as a static method of the PBKDF2 class
# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
# crypt.
PBKDF2.crypt = staticmethod(crypt)
def _makesalt():
"""Return a 48-bit pseudorandom salt for crypt().
This function is not suitable for generating cryptographic secrets.
"""
binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
return b64encode(binarysalt, "./")
def test_pbkdf2():
"""Module self-test"""
from binascii import a2b_hex
#
# Test vectors from RFC 3962
#
# Test 1
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16)
expected = a2b_hex("cdedb5281bb2f801565a1122b2563515")
if result != expected:
raise RuntimeError("self-test failed")
# Test 2
result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32)
expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b"
"a7e52ddbc5e5142f708a31e2e62b1e13")
if result != expected:
raise RuntimeError("self-test failed")
# Test 3
result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32)
expected = ("139c30c0966bc32ba55fdbf212530ac9"
"c5ec59f1a452f5cc9ad940fea0598ed1")
if result != expected:
raise RuntimeError("self-test failed")
# Test 4
result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32)
expected = ("9ccad6d468770cd51b10e6a68721be61"
"1a8b4d282601db3b36be9246915ec82a")
if result != expected:
raise RuntimeError("self-test failed")
#
# Other test vectors
#
# Chunked read
f = PBKDF2("kickstart", "workbench", 256)
result = f.read(17)
result += f.read(17)
result += f.read(1)
result += f.read(2)
result += f.read(3)
expected = PBKDF2("kickstart", "workbench", 256).read(40)
if result != expected:
raise RuntimeError("self-test failed")
#
# crypt() test vectors
#
# crypt 1
result = crypt("cloadm", "exec")
expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql'
if result != expected:
raise RuntimeError("self-test failed")
# crypt 2
result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....')
expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g'
if result != expected:
raise RuntimeError("self-test failed")
# crypt 3
result = crypt("dcl", "tUsch7fU", iterations=13)
expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL"
if result != expected:
raise RuntimeError("self-test failed")
# crypt 4 (unicode)
result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2',
'$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ')
expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ'
if result != expected:
raise RuntimeError("self-test failed")
if __name__ == '__main__':
test_pbkdf2()
# vim:set ts=4 sw=4 sts=4 expandtab:
|
gpl-3.0
|
angelapper/odoo
|
addons/survey/survey.py
|
8
|
58353
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DF
from openerp.addons.website.models.website import slug
from urlparse import urljoin
from itertools import product
from collections import Counter
from collections import OrderedDict
from openerp.exceptions import UserError
import datetime
import logging
import re
import uuid
_logger = logging.getLogger(__name__)
class survey_stage(osv.Model):
"""Stages for Kanban view of surveys"""
_name = 'survey.stage'
_description = 'Survey Stage'
_order = 'sequence,id'
_columns = {
'name': fields.char(string="Name", required=True, translate=True),
'sequence': fields.integer(string="Sequence"),
'closed': fields.boolean(string="Closed", help="If closed, people won't be able to answer to surveys in this column."),
'fold': fields.boolean(string="Folded in kanban view")
}
_defaults = {
'sequence': 1,
'closed': False
}
_sql_constraints = [
('positive_sequence', 'CHECK(sequence >= 0)', 'Sequence number MUST be a natural')
]
class survey_survey(osv.Model):
'''Settings for a multi-page/multi-question survey.
Each survey can have one or more attached pages, and each page can display
one or more questions.
'''
_name = 'survey.survey'
_description = 'Survey'
_rec_name = 'title'
_inherit = ['mail.thread', 'ir.needaction_mixin']
# Protected methods #
def _has_questions(self, cr, uid, ids, context=None):
""" Ensure that this survey has at least one page with at least one
question. """
for survey in self.browse(cr, uid, ids, context=context):
if not survey.page_ids or not [page.question_ids
for page in survey.page_ids if page.question_ids]:
return False
return True
## Function fields ##
def _is_designed(self, cr, uid, ids, name, arg, context=None):
res = dict()
for survey in self.browse(cr, uid, ids, context=context):
if not survey.page_ids or not [page.question_ids
for page in survey.page_ids if page.question_ids]:
res[survey.id] = False
else:
res[survey.id] = True
return res
def _get_tot_sent_survey(self, cr, uid, ids, name, arg, context=None):
""" Returns the number of invitations sent for this survey, be they
(partially) completed or not """
res = dict((id, 0) for id in ids)
sur_res_obj = self.pool.get('survey.user_input')
for id in ids:
res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID,
[('survey_id', '=', id), ('type', '=', 'link')],
context=context, count=True)
return res
def _get_tot_start_survey(self, cr, uid, ids, name, arg, context=None):
""" Returns the number of started instances of this survey, be they
completed or not """
res = dict((id, 0) for id in ids)
sur_res_obj = self.pool.get('survey.user_input')
for id in ids:
res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID,
['&', ('survey_id', '=', id), '|', ('state', '=', 'skip'), ('state', '=', 'done')],
context=context, count=True)
return res
def _get_tot_comp_survey(self, cr, uid, ids, name, arg, context=None):
""" Returns the number of completed instances of this survey """
res = dict((id, 0) for id in ids)
sur_res_obj = self.pool.get('survey.user_input')
for id in ids:
res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID,
[('survey_id', '=', id), ('state', '=', 'done')],
context=context, count=True)
return res
def _get_public_url(self, cr, uid, ids, name, arg, context=None):
""" Computes a public URL for the survey """
if context and context.get('relative_url'):
base_url = '/'
else:
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
res = {}
for survey in self.browse(cr, uid, ids, context=context):
res[survey.id] = urljoin(base_url, "survey/start/%s" % slug(survey))
return res
def _get_public_url_html(self, cr, uid, ids, name, arg, context=None):
""" Computes a public URL for the survey (html-embeddable version)"""
urls = self._get_public_url(cr, uid, ids, name, arg, context=context)
for id, url in urls.iteritems():
urls[id] = '<a href="%s">%s</a>' % (url, _("Click here to start survey"))
return urls
def _get_print_url(self, cr, uid, ids, name, arg, context=None):
""" Computes a printing URL for the survey """
if context and context.get('relative_url'):
base_url = '/'
else:
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
res = {}
for survey in self.browse(cr, uid, ids, context=context):
res[survey.id] = urljoin(base_url, "survey/print/%s" % slug(survey))
return res
def _get_result_url(self, cr, uid, ids, name, arg, context=None):
""" Computes an URL for the survey results """
if context and context.get('relative_url'):
base_url = '/'
else:
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
res = {}
for survey in self.browse(cr, uid, ids, context=context):
res[survey.id] = urljoin(base_url, "survey/results/%s" % slug(survey))
return res
# Model fields #
_columns = {
'title': fields.char('Title', required=1, translate=True),
'page_ids': fields.one2many('survey.page', 'survey_id', 'Pages', copy=True),
'stage_id': fields.many2one('survey.stage', string="Stage", ondelete="set null", copy=False),
'auth_required': fields.boolean('Login required',
help="Users with a public link will be requested to login before taking part to the survey",
oldname="authenticate"),
'users_can_go_back': fields.boolean('Users can go back',
help="If checked, users can go back to previous pages."),
'tot_sent_survey': fields.function(_get_tot_sent_survey,
string="Number of sent surveys", type="integer"),
'tot_start_survey': fields.function(_get_tot_start_survey,
string="Number of started surveys", type="integer"),
'tot_comp_survey': fields.function(_get_tot_comp_survey,
string="Number of completed surveys", type="integer"),
'description': fields.html('Description', translate=True,
oldname="description", help="A long description of the purpose of the survey"),
'color': fields.integer('Color Index'),
'user_input_ids': fields.one2many('survey.user_input', 'survey_id',
'User responses', readonly=1),
'designed': fields.function(_is_designed, string="Is designed?",
type="boolean"),
'public_url': fields.function(_get_public_url,
string="Public link", type="char"),
'public_url_html': fields.function(_get_public_url_html,
string="Public link (html version)", type="char"),
'print_url': fields.function(_get_print_url,
string="Print link", type="char"),
'result_url': fields.function(_get_result_url,
string="Results link", type="char"),
'email_template_id': fields.many2one('mail.template',
'Email Template', ondelete='set null'),
'thank_you_message': fields.html('Thank you message', translate=True,
help="This message will be displayed when survey is completed"),
'quizz_mode': fields.boolean(string='Quiz mode')
}
def _default_stage(self, cr, uid, context=None):
ids = self.pool['survey.stage'].search(cr, uid, [], limit=1, context=context)
if ids:
return ids[0]
return False
_defaults = {
'color': 0,
'stage_id': lambda self, *a, **kw: self._default_stage(*a, **kw)
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
""" Read group customization in order to display all the stages in the
kanban view, even if they are empty """
stage_obj = self.pool.get('survey.stage')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
stage_ids = stage_obj._search(cr, uid, [], order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids
}
# Public methods #
def copy_data(self, cr, uid, id, default=None, context=None):
current_rec = self.read(cr, uid, id, fields=['title'], context=context)
title = _("%s (copy)") % (current_rec.get('title'))
default = dict(default or {}, title=title)
return super(survey_survey, self).copy_data(cr, uid, id, default,
context=context)
def next_page(self, cr, uid, user_input, page_id, go_back=False, context=None):
'''The next page to display to the user, knowing that page_id is the id
of the last displayed page.
If page_id == 0, it will always return the first page of the survey.
If all the pages have been displayed and go_back == False, it will
return None
If go_back == True, it will return the *previous* page instead of the
next page.
.. note::
It is assumed here that a careful user will not try to set go_back
to True if she knows that the page to display is the first one!
(doing this will probably cause a giant worm to eat her house)'''
survey = user_input.survey_id
pages = list(enumerate(survey.page_ids))
# First page
if page_id == 0:
return (pages[0][1], 0, len(pages) == 1)
current_page_index = pages.index((filter(lambda p: p[1].id == page_id, pages))[0])
# All the pages have been displayed
if current_page_index == len(pages) - 1 and not go_back:
return (None, -1, False)
# Let's get back, baby!
elif go_back and survey.users_can_go_back:
return (pages[current_page_index - 1][1], current_page_index - 1, False)
else:
# This will show the last page
if current_page_index == len(pages) - 2:
return (pages[current_page_index + 1][1], current_page_index + 1, True)
# This will show a regular page
else:
return (pages[current_page_index + 1][1], current_page_index + 1, False)
def filter_input_ids(self, cr, uid, survey, filters, finished=False, context=None):
'''If user applies any filters, then this function returns list of
filtered user_input_id and label's strings for display data in web.
:param filters: list of dictionary (having: row_id, ansewr_id)
:param finished: True for completely filled survey,Falser otherwise.
:returns list of filtered user_input_ids.
'''
context = context if context else {}
if filters:
input_line_obj = self.pool.get('survey.user_input_line')
domain_filter, choice, filter_display_data = [], [], []
for filter in filters:
row_id, answer_id = filter['row_id'], filter['answer_id']
if row_id == 0:
choice.append(answer_id)
else:
domain_filter.extend(['|', ('value_suggested_row.id', '=', row_id), ('value_suggested.id', '=', answer_id)])
if choice:
domain_filter.insert(0, ('value_suggested.id', 'in', choice))
else:
domain_filter = domain_filter[1:]
line_ids = input_line_obj.search(cr, uid, domain_filter, context=context)
filtered_input_ids = [input.user_input_id.id for input in input_line_obj.browse(cr, uid, line_ids, context=context)]
else:
filtered_input_ids, filter_display_data = [], []
if finished:
user_input = self.pool.get('survey.user_input')
if not filtered_input_ids:
current_filters = user_input.search(cr, uid, [('survey_id', '=', survey.id)], context=context)
user_input_objs = user_input.browse(cr, uid, current_filters, context=context)
else:
user_input_objs = user_input.browse(cr, uid, filtered_input_ids, context=context)
return [input.id for input in user_input_objs if input.state == 'done']
return filtered_input_ids
def get_filter_display_data(self, cr, uid, filters, context):
'''Returns data to display current filters
:param filters: list of dictionary (having: row_id, answer_id)
:param finished: True for completely filled survey, False otherwise.
:returns list of dict having data to display filters.
'''
filter_display_data = []
if filters:
question_obj = self.pool.get('survey.question')
label_obj = self.pool.get('survey.label')
for filter in filters:
row_id, answer_id = filter['row_id'], filter['answer_id']
question_id = label_obj.browse(cr, uid, answer_id, context=context).question_id.id
question = question_obj.browse(cr, uid, question_id, context=context)
if row_id == 0:
labels = label_obj.browse(cr, uid, [answer_id], context=context)
else:
labels = label_obj.browse(cr, uid, [row_id, answer_id], context=context)
filter_display_data.append({'question_text': question.question, 'labels': [label.value for label in labels]})
return filter_display_data
def prepare_result(self, cr, uid, question, current_filters=None, context=None):
''' Compute statistical data for questions by counting number of vote per choice on basis of filter '''
current_filters = current_filters if current_filters else []
context = context if context else {}
result_summary = {}
#Calculate and return statistics for choice
if question.type in ['simple_choice', 'multiple_choice']:
answers = {}
comments = []
[answers.update({label.id: {'text': label.value, 'count': 0, 'answer_id': label.id}}) for label in question.labels_ids]
for input_line in question.user_input_line_ids:
if input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters):
answers[input_line.value_suggested.id]['count'] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers.values(), 'comments': comments}
#Calculate and return statistics for matrix
if question.type == 'matrix':
rows = OrderedDict()
answers = OrderedDict()
res = dict()
comments = []
[rows.update({label.id: label.value}) for label in question.labels_ids_2]
[answers.update({label.id: label.value}) for label in question.labels_ids]
for cell in product(rows.keys(), answers.keys()):
res[cell] = 0
for input_line in question.user_input_line_ids:
if input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters) and input_line.value_suggested_row:
res[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments}
#Calculate and return statistics for free_text, textbox, datetime
if question.type in ['free_text', 'textbox', 'datetime']:
result_summary = []
for input_line in question.user_input_line_ids:
if not(current_filters) or input_line.user_input_id.id in current_filters:
result_summary.append(input_line)
#Calculate and return statistics for numerical_box
if question.type == 'numerical_box':
result_summary = {'input_lines': []}
all_inputs = []
for input_line in question.user_input_line_ids:
if not(current_filters) or input_line.user_input_id.id in current_filters:
all_inputs.append(input_line.value_number)
result_summary['input_lines'].append(input_line)
if all_inputs:
result_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2),
'max': round(max(all_inputs), 2),
'min': round(min(all_inputs), 2),
'sum': sum(all_inputs),
'most_common': Counter(all_inputs).most_common(5)})
return result_summary
def get_input_summary(self, cr, uid, question, current_filters=None, context=None):
''' Returns overall summary of question e.g. answered, skipped, total_inputs on basis of filter '''
current_filters = current_filters if current_filters else []
context = context if context else {}
result = {}
if question.survey_id.user_input_ids:
total_input_ids = current_filters or [input_id.id for input_id in question.survey_id.user_input_ids if input_id.state != 'new']
result['total_inputs'] = len(total_input_ids)
question_input_ids = []
for user_input in question.user_input_line_ids:
if not user_input.skipped:
question_input_ids.append(user_input.user_input_id.id)
result['answered'] = len(set(question_input_ids) & set(total_input_ids))
result['skipped'] = result['total_inputs'] - result['answered']
return result
# Actions
def action_start_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey form '''
trail = ""
context = dict(context or {}, relative_url=True)
if 'survey_token' in context:
trail = "/" + context['survey_token']
return {
'type': 'ir.actions.act_url',
'name': "Start Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['public_url'], context=context)[0]['public_url'] + trail
}
def action_send_survey(self, cr, uid, ids, context=None):
''' Open a window to compose an email, pre-filled with the survey
message '''
if not self._has_questions(cr, uid, ids, context=None):
raise UserError(_('You cannot send an invitation for a survey that has no questions.'))
survey_browse = self.pool.get('survey.survey').browse(cr, uid, ids,
context=context)[0]
if survey_browse.stage_id.closed:
raise UserError(_("You cannot send invitations for closed surveys."))
assert len(ids) == 1, 'This option should only be used for a single \
survey at a time.'
ir_model_data = self.pool.get('ir.model.data')
templates = ir_model_data.get_object_reference(cr, uid,
'survey', 'email_template_survey')
template_id = templates[1] if len(templates) > 0 else False
ctx = dict(context)
ctx.update({'default_model': 'survey.survey',
'default_res_id': ids[0],
'default_survey_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment'}
)
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'survey.mail.compose.message',
'target': 'new',
'context': ctx,
}
def action_print_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey printable view '''
trail = ""
context = dict(context or {}, relative_url=True)
if 'survey_token' in context:
trail = "/" + context['survey_token']
return {
'type': 'ir.actions.act_url',
'name': "Print Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['print_url'], context=context)[0]['print_url'] + trail
}
def action_result_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey results view '''
context = dict(context or {}, relative_url=True)
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['result_url'], context=context)[0]['result_url']
}
def action_test_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey form into test mode'''
context = dict(context or {}, relative_url=True)
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['public_url'], context=context)[0]['public_url'] + "/phantom"
}
class survey_page(osv.Model):
'''A page for a survey.
Pages are essentially containers, allowing to group questions by ordered
screens.
.. note::
A page should be deleted if the survey it belongs to is deleted. '''
_name = 'survey.page'
_description = 'Survey Page'
_rec_name = 'title'
_order = 'sequence,id'
# Model Fields #
_columns = {
'title': fields.char('Page Title', required=1,
translate=True),
'survey_id': fields.many2one('survey.survey', 'Survey',
ondelete='cascade', required=True),
'question_ids': fields.one2many('survey.question', 'page_id',
'Questions', copy=True),
'sequence': fields.integer('Page number'),
'description': fields.html('Description',
help="An introductory text to your page", translate=True,
oldname="note"),
}
_defaults = {
'sequence': 10
}
class survey_question(osv.Model):
''' Questions that will be asked in a survey.
Each question can have one of more suggested answers (eg. in case of
dropdown choices, multi-answer checkboxes, radio buttons...).'''
_name = 'survey.question'
_description = 'Survey Question'
_rec_name = 'question'
_order = 'sequence,id'
# Model fields #
_columns = {
# Question metadata
'page_id': fields.many2one('survey.page', 'Survey page',
ondelete='cascade', required=1),
'survey_id': fields.related('page_id', 'survey_id', type='many2one',
relation='survey.survey', string='Survey'),
'sequence': fields.integer(string='Sequence'),
# Question
'question': fields.char('Question Name', required=1, translate=True),
'description': fields.html('Description', help="Use this field to add \
additional explanations about your question", translate=True,
oldname='descriptive_text'),
# Answer
'type': fields.selection([('free_text', 'Multiple Lines Text Box'),
('textbox', 'Single Line Text Box'),
('numerical_box', 'Numerical Value'),
('datetime', 'Date and Time'),
('simple_choice', 'Multiple choice: only one answer'),
('multiple_choice', 'Multiple choice: multiple answers allowed'),
('matrix', 'Matrix')], 'Type of Question', size=15, required=1),
'matrix_subtype': fields.selection([('simple', 'One choice per row'),
('multiple', 'Multiple choices per row')], 'Matrix Type'),
'labels_ids': fields.one2many('survey.label',
'question_id', 'Types of answers', oldname='answer_choice_ids', copy=True),
'labels_ids_2': fields.one2many('survey.label',
'question_id_2', 'Rows of the Matrix', copy=True),
# labels are used for proposed choices
# if question.type == simple choice | multiple choice
# -> only labels_ids is used
# if question.type == matrix
# -> labels_ids are the columns of the matrix
# -> labels_ids_2 are the rows of the matrix
# Display options
'column_nb': fields.selection([('12', '1'),
('6', '2'),
('4', '3'),
('3', '4'),
('2', '6')],
'Number of columns'),
# These options refer to col-xx-[12|6|4|3|2] classes in Bootstrap
'display_mode': fields.selection([('columns', 'Radio Buttons'),
('dropdown', 'Selection Box')],
'Display mode'),
# Comments
'comments_allowed': fields.boolean('Show Comments Field',
oldname="allow_comment"),
'comments_message': fields.char('Comment Message', translate=True),
'comment_count_as_answer': fields.boolean('Comment Field is an Answer Choice',
oldname='make_comment_field'),
# Validation
'validation_required': fields.boolean('Validate entry',
oldname='is_validation_require'),
'validation_email': fields.boolean('Input must be an email'),
'validation_length_min': fields.integer('Minimum Text Length'),
'validation_length_max': fields.integer('Maximum Text Length'),
'validation_min_float_value': fields.float('Minimum value'),
'validation_max_float_value': fields.float('Maximum value'),
'validation_min_date': fields.datetime('Minimum Date'),
'validation_max_date': fields.datetime('Maximum Date'),
'validation_error_msg': fields.char('Error message',
oldname='validation_valid_err_msg',
translate=True),
# Constraints on number of answers (matrices)
'constr_mandatory': fields.boolean('Mandatory Answer',
oldname="is_require_answer"),
'constr_error_msg': fields.char("Error message",
oldname='req_error_msg', translate=True),
'user_input_line_ids': fields.one2many('survey.user_input_line',
'question_id', 'Answers',
domain=[('skipped', '=', False)]),
}
_defaults = {
'page_id': lambda self, cr, uid, context: context.get('page_id'),
'sequence': 10,
'type': 'free_text',
'matrix_subtype': 'simple',
'column_nb': '12',
'display_mode': 'columns',
'constr_error_msg': lambda s, cr, uid, c: _('This question requires an answer.'),
'validation_error_msg': lambda s, cr, uid, c: _('The answer you entered has an invalid format.'),
'validation_required': False,
'comments_message': lambda s, cr, uid, c: _('If other, precise:'),
}
_sql_constraints = [
('positive_len_min', 'CHECK (validation_length_min >= 0)', 'A length must be positive!'),
('positive_len_max', 'CHECK (validation_length_max >= 0)', 'A length must be positive!'),
('validation_length', 'CHECK (validation_length_min <= validation_length_max)', 'Max length cannot be smaller than min length!'),
('validation_float', 'CHECK (validation_min_float_value <= validation_max_float_value)', 'Max value cannot be smaller than min value!'),
('validation_date', 'CHECK (validation_min_date <= validation_max_date)', 'Max date cannot be smaller than min date!')
]
def onchange_validation_email(self, cr, uid, ids, validation_email, context=None):
return {'value': {'validation_required': False}} if validation_email else {}
# Validation methods
def validate_question(self, cr, uid, question, post, answer_tag, context=None):
''' Validate question, depending on question type and parameters '''
try:
checker = getattr(self, 'validate_' + question.type)
except AttributeError:
_logger.warning(question.type + ": This type of question has no validation method")
return {}
else:
return checker(cr, uid, question, post, answer_tag, context=context)
def validate_free_text(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
return errors
def validate_textbox(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
# Email format validation
# Note: this validation is very basic:
# all the strings of the form
# <something>@<anything>.<extension>
# will be accepted
if answer and question.validation_email:
if not re.match(r"[^@]+@[^@]+\.[^@]+", answer):
errors.update({answer_tag: _('This answer must be an email address')})
# Answer validation (if properly defined)
# Length of the answer must be in a range
if answer and question.validation_required:
if not (question.validation_length_min <= len(answer) <= question.validation_length_max):
errors.update({answer_tag: question.validation_error_msg})
return errors
def validate_numerical_box(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
# Checks if user input is a number
if answer:
try:
floatanswer = float(answer)
except ValueError:
errors.update({answer_tag: _('This is not a number')})
# Answer validation (if properly defined)
if answer and question.validation_required:
# Answer is not in the right range
try:
floatanswer = float(answer) # check that it is a float has been done hereunder
if not (question.validation_min_float_value <= floatanswer <= question.validation_max_float_value):
errors.update({answer_tag: question.validation_error_msg})
except ValueError:
pass
return errors
def validate_datetime(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
# Checks if user input is a datetime
if answer:
try:
dateanswer = datetime.datetime.strptime(answer, DF)
except ValueError:
errors.update({answer_tag: _('This is not a date/time')})
return errors
# Answer validation (if properly defined)
if answer and question.validation_required:
# Answer is not in the right range
try:
dateanswer = datetime.datetime.strptime(answer, DF)
min_date = question.validation_min_date and datetime.datetime.strptime(question.validation_min_date, DF) or False
max_date = question.validation_max_date and datetime.datetime.strptime(question.validation_max_date, DF) or False
if (min_date and max_date and not(min_date <= dateanswer <= max_date)):
# If Minimum and Maximum Date are entered
errors.update({answer_tag: question.validation_error_msg})
elif (min_date and not(min_date <= dateanswer)):
# If only Minimum Date is entered and not Define Maximum Date
errors.update({answer_tag: question.validation_error_msg})
elif (max_date and not(dateanswer <= max_date)):
# If only Maximum Date is entered and not Define Minimum Date
errors.update({answer_tag: question.validation_error_msg})
except ValueError: # check that it is a datetime has been done hereunder
pass
return errors
def validate_simple_choice(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
if question.comments_allowed:
comment_tag = "%s_%s" % (answer_tag, 'comment')
# Empty answer to mandatory question
if question.constr_mandatory and answer_tag not in post:
errors.update({answer_tag: question.constr_error_msg})
if question.constr_mandatory and answer_tag in post and post[answer_tag].strip() == '':
errors.update({answer_tag: question.constr_error_msg})
# Answer is a comment and is empty
if question.constr_mandatory and answer_tag in post and post[answer_tag] == "-1" and question.comment_count_as_answer and comment_tag in post and not post[comment_tag].strip():
errors.update({answer_tag: question.constr_error_msg})
return errors
def validate_multiple_choice(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
if question.constr_mandatory:
answer_candidates = dict_keys_startswith(post, answer_tag)
comment_flag = answer_candidates.pop(("%s_%s" % (answer_tag, -1)), None)
if question.comments_allowed:
comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
# Preventing answers with blank value
if all([True if answer.strip() == '' else False for answer in answer_candidates.values()]):
errors.update({answer_tag: question.constr_error_msg})
# There is no answer neither comments (if comments count as answer)
if not answer_candidates and question.comment_count_as_answer and (not comment_flag or not comment_answer):
errors.update({answer_tag: question.constr_error_msg})
# There is no answer at all
if not answer_candidates and not question.comment_count_as_answer:
errors.update({answer_tag: question.constr_error_msg})
return errors
def validate_matrix(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
if question.constr_mandatory:
lines_number = len(question.labels_ids_2)
answer_candidates = dict_keys_startswith(post, answer_tag)
comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
# Number of lines that have been answered
if question.matrix_subtype == 'simple':
answer_number = len(answer_candidates)
elif question.matrix_subtype == 'multiple':
answer_number = len(set([sk.rsplit('_', 1)[0] for sk in answer_candidates.keys()]))
else:
raise RuntimeError("Invalid matrix subtype")
# Validate that each line has been answered
if answer_number != lines_number:
errors.update({answer_tag: question.constr_error_msg})
return errors
class survey_label(osv.Model):
''' A suggested answer for a question '''
_name = 'survey.label'
_rec_name = 'value'
_order = 'sequence,id'
_description = 'Survey Label'
def _check_question_not_empty(self, cr, uid, ids, context=None):
'''Ensure that field question_id XOR field question_id_2 is not null'''
for label in self.browse(cr, uid, ids, context=context):
# 'bool()' is required in order to make '!=' act as XOR with objects
return bool(label.question_id) != bool(label.question_id_2)
_columns = {
'question_id': fields.many2one('survey.question', 'Question',
ondelete='cascade'),
'question_id_2': fields.many2one('survey.question', 'Question',
ondelete='cascade'),
'sequence': fields.integer('Label Sequence order'),
'value': fields.char("Suggested value", translate=True,
required=True),
'quizz_mark': fields.float('Score for this choice', help="A positive score indicates a correct choice; a negative or null score indicates a wrong answer"),
}
_defaults = {
'sequence': 10,
}
_constraints = [
(_check_question_not_empty, "A label must be attached to one and only one question", ['question_id', 'question_id_2'])
]
class survey_user_input(osv.Model):
''' Metadata for a set of one user's answers to a particular survey '''
_name = "survey.user_input"
_rec_name = 'date_create'
_description = 'Survey User Input'
def _quizz_get_score(self, cr, uid, ids, name, args, context=None):
ret = dict()
for user_input in self.browse(cr, uid, ids, context=context):
ret[user_input.id] = sum([uil.quizz_mark for uil in user_input.user_input_line_ids] or [0.0])
return ret
_columns = {
'survey_id': fields.many2one('survey.survey', 'Survey', required=True,
readonly=1, ondelete='restrict'),
'date_create': fields.datetime('Creation Date', required=True,
readonly=1, copy=False),
'deadline': fields.datetime("Deadline",
help="Date by which the person can open the survey and submit answers",
oldname="date_deadline"),
'type': fields.selection([('manually', 'Manually'), ('link', 'Link')],
'Answer Type', required=1, readonly=1,
oldname="response_type"),
'state': fields.selection([('new', 'Not started yet'),
('skip', 'Partially completed'),
('done', 'Completed')],
'Status',
readonly=True),
'test_entry': fields.boolean('Test entry', readonly=1),
'token': fields.char("Identification token", readonly=1, required=1, copy=False),
# Optional Identification data
'partner_id': fields.many2one('res.partner', 'Partner', readonly=1),
'email': fields.char("E-mail", readonly=1),
# Displaying data
'last_displayed_page_id': fields.many2one('survey.page',
'Last displayed page'),
# The answers !
'user_input_line_ids': fields.one2many('survey.user_input_line',
'user_input_id', 'Answers', copy=True),
# URLs used to display the answers
'result_url': fields.related('survey_id', 'result_url', type='char',
string="Public link to the survey results"),
'print_url': fields.related('survey_id', 'print_url', type='char',
string="Public link to the empty survey"),
'quizz_score': fields.function(_quizz_get_score, type="float", string="Score for the quiz")
}
_defaults = {
'date_create': fields.datetime.now,
'type': 'manually',
'state': 'new',
'token': lambda s, cr, uid, c: uuid.uuid4().__str__(),
'quizz_score': 0.0,
}
_sql_constraints = [
('unique_token', 'UNIQUE (token)', 'A token must be unique!'),
('deadline_in_the_past', 'CHECK (deadline >= date_create)', 'The deadline cannot be in the past')
]
def do_clean_emptys(self, cr, uid, automatic=False, context=None):
''' Remove empty user inputs that have been created manually
(used as a cronjob declared in data/survey_cron.xml) '''
empty_user_input_ids = self.search(cr, uid, [('type', '=', 'manually'),
('state', '=', 'new'),
('date_create', '<', (datetime.datetime.now() - datetime.timedelta(hours=1)).strftime(DF))],
context=context)
if empty_user_input_ids:
self.unlink(cr, uid, empty_user_input_ids, context=context)
def action_survey_resent(self, cr, uid, ids, context=None):
''' Sent again the invitation '''
record = self.browse(cr, uid, ids[0], context=context)
context = dict(context or {})
context.update({
'survey_resent_token': True,
'default_partner_ids': record.partner_id and [record.partner_id.id] or [],
'default_multi_email': record.email or "",
'default_public': 'email_private',
})
return self.pool.get('survey.survey').action_send_survey(cr, uid,
[record.survey_id.id], context=context)
def action_view_answers(self, cr, uid, ids, context=None):
''' Open the website page with the survey form '''
user_input = self.read(cr, uid, ids, ['print_url', 'token'], context=context)[0]
return {
'type': 'ir.actions.act_url',
'name': "View Answers",
'target': 'self',
'url': '%s/%s' % (user_input['print_url'], user_input['token'])
}
def action_survey_results(self, cr, uid, ids, context=None):
''' Open the website page with the survey results '''
return {
'type': 'ir.actions.act_url',
'name': "Survey Results",
'target': 'self',
'url': self.read(cr, uid, ids, ['result_url'], context=context)[0]['result_url']
}
class survey_user_input_line(osv.Model):
_name = 'survey.user_input_line'
_description = 'Survey User Input Line'
_rec_name = 'date_create'
def _answered_or_skipped(self, cr, uid, ids, context=None):
for uil in self.browse(cr, uid, ids, context=context):
# 'bool()' is required in order to make '!=' act as XOR with objects
return uil.skipped != bool(uil.answer_type)
def _check_answer_type(self, cr, uid, ids, context=None):
for uil in self.browse(cr, uid, ids, context=None):
if uil.answer_type:
if uil.answer_type == 'text':
# 'bool()' is required in order to make '!=' act as XOR with objects
return bool(uil.value_text)
elif uil.answer_type == 'number':
return (uil.value_number == 0) or (uil.value_number != False)
elif uil.answer_type == 'date':
return bool(uil.value_date)
elif uil.answer_type == 'free_text':
return bool(uil.value_free_text)
elif uil.answer_type == 'suggestion':
return bool(uil.value_suggested)
return True
_columns = {
'user_input_id': fields.many2one('survey.user_input', 'User Input',
ondelete='cascade', required=1),
'question_id': fields.many2one('survey.question', 'Question',
ondelete='restrict', required=1),
'page_id': fields.related('question_id', 'page_id', type='many2one',
relation='survey.page', string="Page"),
'survey_id': fields.related('user_input_id', 'survey_id',
type="many2one", relation="survey.survey",
string='Survey', store=True),
'date_create': fields.datetime('Create Date', required=1),
'skipped': fields.boolean('Skipped'),
'answer_type': fields.selection([('text', 'Text'),
('number', 'Number'),
('date', 'Date'),
('free_text', 'Free Text'),
('suggestion', 'Suggestion')],
'Answer Type'),
'value_text': fields.char("Text answer"),
'value_number': fields.float("Numerical answer"),
'value_date': fields.datetime("Date answer"),
'value_free_text': fields.text("Free Text answer"),
'value_suggested': fields.many2one('survey.label', "Suggested answer"),
'value_suggested_row': fields.many2one('survey.label', "Row answer"),
'quizz_mark': fields.float("Score given for this choice")
}
_defaults = {
'skipped': False,
'date_create': fields.datetime.now()
}
_constraints = [
(_answered_or_skipped, "A question cannot be unanswered and skipped", ['skipped', 'answer_type']),
(_check_answer_type, "The answer must be in the right type", ['answer_type', 'text', 'number', 'date', 'free_text', 'suggestion'])
]
def __get_mark(self, cr, uid, value_suggested, context=None):
try:
mark = self.pool.get('survey.label').browse(cr, uid, int(value_suggested), context=context).quizz_mark
except AttributeError:
mark = 0.0
except KeyError:
mark = 0.0
except ValueError:
mark = 0.0
return mark
def create(self, cr, uid, vals, context=None):
value_suggested = vals.get('value_suggested')
if value_suggested:
vals.update({'quizz_mark': self.__get_mark(cr, uid, value_suggested)})
return super(survey_user_input_line, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
value_suggested = vals.get('value_suggested')
if value_suggested:
vals.update({'quizz_mark': self.__get_mark(cr, uid, value_suggested)})
return super(survey_user_input_line, self).write(cr, uid, ids, vals, context=context)
def save_lines(self, cr, uid, user_input_id, question, post, answer_tag,
context=None):
''' Save answers to questions, depending on question type
If an answer already exists for question and user_input_id, it will be
overwritten (in order to maintain data consistency). '''
try:
saver = getattr(self, 'save_line_' + question.type)
except AttributeError:
_logger.error(question.type + ": This type of question has no saving function")
return False
else:
saver(cr, uid, user_input_id, question, post, answer_tag, context=context)
def save_line_free_text(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False,
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'free_text', 'value_free_text': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_textbox(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'text', 'value_text': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_numerical_box(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'number', 'value_number': float(post[answer_tag])})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_datetime(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'date', 'value_date': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_simple_choice(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.unlink(cr, SUPERUSER_ID, old_uil, context=context)
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'suggestion', 'value_suggested': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
# '-1' indicates 'comment count as an answer' so do not need to record it
if post.get(answer_tag) and post.get(answer_tag) != '-1':
self.create(cr, uid, vals, context=context)
comment_answer = post.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer, 'skipped': False, 'value_suggested': False})
self.create(cr, uid, vals, context=context)
return True
def save_line_multiple_choice(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.unlink(cr, SUPERUSER_ID, old_uil, context=context)
ca = dict_keys_startswith(post, answer_tag)
comment_answer = ca.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if len(ca) > 0:
for a in ca:
# '-1' indicates 'comment count as an answer' so do not need to record it
if a != ('%s_%s' % (answer_tag, '-1')):
vals.update({'answer_type': 'suggestion', 'value_suggested': ca[a]})
self.create(cr, uid, vals, context=context)
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer, 'value_suggested': False})
self.create(cr, uid, vals, context=context)
if not ca and not comment_answer:
vals.update({'answer_type': None, 'skipped': True})
self.create(cr, uid, vals, context=context)
return True
def save_line_matrix(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.unlink(cr, SUPERUSER_ID, old_uil, context=context)
no_answers = True
ca = dict_keys_startswith(post, answer_tag)
comment_answer = ca.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer})
self.create(cr, uid, vals, context=context)
no_answers = False
if question.matrix_subtype == 'simple':
for row in question.labels_ids_2:
a_tag = "%s_%s" % (answer_tag, row.id)
if a_tag in ca:
no_answers = False
vals.update({'answer_type': 'suggestion', 'value_suggested': ca[a_tag], 'value_suggested_row': row.id})
self.create(cr, uid, vals, context=context)
elif question.matrix_subtype == 'multiple':
for col in question.labels_ids:
for row in question.labels_ids_2:
a_tag = "%s_%s_%s" % (answer_tag, row.id, col.id)
if a_tag in ca:
no_answers = False
vals.update({'answer_type': 'suggestion', 'value_suggested': col.id, 'value_suggested_row': row.id})
self.create(cr, uid, vals, context=context)
if no_answers:
vals.update({'answer_type': None, 'skipped': True})
self.create(cr, uid, vals, context=context)
return True
def dict_keys_startswith(dictionary, string):
'''Returns a dictionary containing the elements of <dict> whose keys start
with <string>.
.. note::
This function uses dictionary comprehensions (Python >= 2.7)'''
return {k: dictionary[k] for k in filter(lambda key: key.startswith(string), dictionary.keys())}
|
agpl-3.0
|
erdincay/pyload
|
module/plugins/accounts/FileserveCom.py
|
6
|
1743
|
# -*- coding: utf-8 -*-
import time
from module.plugins.internal.Account import Account
from module.common.json_layer import json_loads
class FileserveCom(Account):
__name__ = "FileserveCom"
__type__ = "account"
__version__ = "0.22"
__status__ = "testing"
__description__ = """Fileserve.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("mkaay", "[email protected]")]
def parse_info(self, user, password, data, req):
data = self.get_data(user)
html = self.load("http://app.fileserve.com/api/login/",
post={'username': user,
'password': password,
'submit': "Submit+Query"})
res = json_loads(html)
if res['type'] == "premium":
validuntil = time.mktime(time.strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
return {'trafficleft': res['traffic'], 'validuntil': validuntil}
else:
return {'premium': False, 'trafficleft': None, 'validuntil': None}
def login(self, user, password, data, req):
html = self.load("http://app.fileserve.com/api/login/",
post={'username': user,
'password': password,
'submit' : "Submit+Query"})
res = json_loads(html)
if not res['type']:
self.login_fail()
#: Login at fileserv html
self.load("http://www.fileserve.com/login.php",
post={'loginUserName' : user,
'loginUserPassword': password,
'autoLogin' : "checked",
'loginFormSubmit' : "Login"})
|
gpl-3.0
|
nishad89/newfies-dialer
|
newfies/appointment/admin_filters.py
|
4
|
1585
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.contrib.admin import SimpleListFilter
from django.utils.translation import ugettext as _
from appointment.function_def import manager_list_of_calendar_user
from appointment.models.users import CalendarUserProfile
class ManagerFilter(SimpleListFilter):
title = _('manager')
parameter_name = 'manager'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return manager_list_of_calendar_user()
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() is not None:
calendar_user_id_list = CalendarUserProfile.objects.values_list('user_id', flat=True).filter(manager_id=self.value())
return queryset.filter(id__in=calendar_user_id_list)
else:
return queryset
|
mpl-2.0
|
nedn/pybitcoin
|
third_party/python-ecdsa/src/ecdsa/ellipticcurve.py
|
1
|
5095
|
#! /usr/bin/env python
#
# Implementation of elliptic curves, for cryptographic applications.
#
# This module doesn't provide any way to choose a random elliptic
# curve, nor to verify that an elliptic curve was chosen randomly,
# because one can simply use NIST's standard curves.
#
# Notes from X9.62-1998 (draft):
# Nomenclature:
# - Q is a public key.
# The "Elliptic Curve Domain Parameters" include:
# - q is the "field size", which in our case equals p.
# - p is a big prime.
# - G is a point of prime order (5.1.1.1).
# - n is the order of G (5.1.1.1).
# Public-key validation (5.2.2):
# - Verify that Q is not the point at infinity.
# - Verify that X_Q and Y_Q are in [0,p-1].
# - Verify that Q is on the curve.
# - Verify that nQ is the point at infinity.
# Signature generation (5.3):
# - Pick random k from [1,n-1].
# Signature checking (5.4.2):
# - Verify that r and s are in [1,n-1].
#
# Version of 2008.11.25.
#
# Revision history:
# 2005.12.31 - Initial version.
# 2008.11.25 - Change CurveFp.is_on to contains_point.
#
# Written in 2005 by Peter Pearson and placed in the public domain.
from __future__ import division
from six import python_2_unicode_compatible
from . import numbertheory
@python_2_unicode_compatible
class CurveFp(object):
"""Elliptic Curve over the field of integers modulo a prime."""
def __init__(self, p, a, b):
"""The curve of points satisfying y^2 = x^3 + a*x + b (mod p)."""
self.__p = p
self.__a = a
self.__b = b
def p(self):
return self.__p
def a(self):
return self.__a
def b(self):
return self.__b
def contains_point(self, x, y):
"""Is the point (x,y) on this curve?"""
return (y * y - (x * x * x + self.__a * x + self.__b)) % self.__p == 0
def __str__(self):
return "CurveFp(p=%d, a=%d, b=%d)" % (self.__p, self.__a, self.__b)
class Point(object):
"""A point on an elliptic curve. Altering x and y is forbidding,
but they can be read by the x() and y() methods."""
def __init__(self, curve, x, y, order=None):
"""curve, x, y, order; order (optional) is the order of this point."""
self.__curve = curve
self.__x = x
self.__y = y
self.__order = order
# self.curve is allowed to be None only for INFINITY:
if self.__curve:
assert self.__curve.contains_point(x, y)
if order:
assert self * order == INFINITY
def __eq__(self, other):
"""Return True if the points are identical, False otherwise."""
if self.__curve == other.__curve \
and self.__x == other.__x \
and self.__y == other.__y:
return True
else:
return False
def __add__(self, other):
"""Add one point to another point."""
# X9.62 B.3:
if other == INFINITY:
return self
if self == INFINITY:
return other
assert self.__curve == other.__curve
if self.__x == other.__x:
if (self.__y + other.__y) % self.__curve.p() == 0:
return INFINITY
else:
return self.double()
p = self.__curve.p()
l = ((other.__y - self.__y) * \
numbertheory.inverse_mod(other.__x - self.__x, p)) % p
x3 = (l * l - self.__x - other.__x) % p
y3 = (l * (self.__x - x3) - self.__y) % p
return Point(self.__curve, x3, y3)
def __mul__(self, other):
"""Multiply a point by an integer."""
def leftmost_bit(x):
assert x > 0
result = 1
while result <= x:
result = 2 * result
return result // 2
e = other
if self.__order:
e = e % self.__order
if e == 0:
return INFINITY
if self == INFINITY:
return INFINITY
assert e > 0
# From X9.62 D.3.2:
e3 = 3 * e
negative_self = Point(self.__curve, self.__x, -self.__y, self.__order)
i = leftmost_bit(e3) // 2
result = self
# print_("Multiplying %s by %d (e3 = %d):" % (self, other, e3))
while i > 1:
result = result.double()
if (e3 & i) != 0 and (e & i) == 0:
result = result + self
if (e3 & i) == 0 and (e & i) != 0:
result = result + negative_self
# print_(". . . i = %d, result = %s" % ( i, result ))
i = i // 2
return result
def __rmul__(self, other):
"""Multiply a point by an integer."""
return self * other
def __str__(self):
if self == INFINITY:
return "infinity"
return "(%d,%d)" % (self.__x, self.__y)
def double(self):
"""Return a new point that is twice the old."""
if self == INFINITY:
return INFINITY
# X9.62 B.3:
p = self.__curve.p()
a = self.__curve.a()
l = ((3 * self.__x * self.__x + a) * \
numbertheory.inverse_mod(2 * self.__y, p)) % p
x3 = (l * l - 2 * self.__x) % p
y3 = (l * (self.__x - x3) - self.__y) % p
return Point(self.__curve, x3, y3)
def x(self):
return self.__x
def y(self):
return self.__y
def curve(self):
return self.__curve
def order(self):
return self.__order
# This one point is the Point At Infinity for all purposes:
INFINITY = Point(None, None, None)
|
mit
|
huchoi/edx-platform
|
cms/djangoapps/course_creators/tests/test_admin.py
|
82
|
8130
|
"""
Tests course_creators.admin.py.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.http import HttpRequest
import mock
from course_creators.admin import CourseCreatorAdmin
from course_creators.models import CourseCreator
from django.core import mail
from student.roles import CourseCreatorRole
from student import auth
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, context))
class CourseCreatorAdminTest(TestCase):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
self.user = User.objects.create_user('test_user', '[email protected]', 'foo')
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = User.objects.create_user('Mark', '[email protected]', 'foo')
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
self.studio_request_email = '[email protected]'
self.enable_creator_group_patch = {
"ENABLE_CREATOR_GROUP": True,
"STUDIO_REQUEST_EMAIL": self.studio_request_email
}
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
@mock.patch('django.contrib.auth.models.User.email_user')
def test_change_status(self, email_user):
"""
Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
"""
def change_state_and_verify_email(state, is_creator):
""" Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
self._change_state(state)
self.assertEqual(is_creator, auth.has_access(self.user, CourseCreatorRole()))
context = {'studio_request_email': self.studio_request_email}
if state == CourseCreator.GRANTED:
template = 'emails/course_creator_granted.txt'
elif state == CourseCreator.DENIED:
template = 'emails/course_creator_denied.txt'
else:
template = 'emails/course_creator_revoked.txt'
email_user.assert_called_with(
mock_render_to_string('emails/course_creator_subject.txt', context),
mock_render_to_string(template, context),
self.studio_request_email
)
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# User is initially unrequested.
self.assertFalse(auth.has_access(self.user, CourseCreatorRole()))
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.DENIED, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.PENDING, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.UNREQUESTED, False)
change_state_and_verify_email(CourseCreator.DENIED, False)
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
def test_mail_admin_on_pending(self):
"""
Tests that the admin account is notified when a user is in the 'pending' state.
"""
def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
""" Changes user state and verifies e-mail sent to admin address only when pending. """
mail.outbox = []
self._change_state(state)
# If a message is sent to the user about course creator status change, it will be the first
# message sent. Admin message will follow.
base_num_emails = 1 if expect_sent_to_user else 0
if expect_sent_to_admin:
context = {'user_name': "test_user", 'user_email': '[email protected]'}
self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
sent_mail = mail.outbox[base_num_emails]
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_subject.txt', context),
sent_mail.subject
)
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
sent_mail.body
)
self.assertEquals(self.studio_request_email, sent_mail.from_email)
self.assertEqual([self.studio_request_email], sent_mail.to)
else:
self.assertEquals(base_num_emails, len(mail.outbox))
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# E-mail message should be sent to admin only when new state is PENDING, regardless of what
# previous state was (unless previous state was already PENDING).
# E-mail message sent to user only on transition into and out of GRANTED state.
check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
def _change_state(self, state):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def test_change_permission(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request))
def test_rate_limit_login(self):
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
post_params = {'username': self.user.username, 'password': 'wrong_password'}
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for _ in xrange(30):
response = self.client.post('/admin/', post_params)
self.assertEquals(response.status_code, 200)
response = self.client.post('/admin/', post_params)
# Since we are using the default rate limit behavior, we are
# expecting this to return a 403 error to indicate that there have
# been too many attempts
self.assertEquals(response.status_code, 403)
|
agpl-3.0
|
klunwebale/odoo
|
addons/auth_openid/controllers/main.py
|
382
|
10399
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
import getpass
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
import openerp.http as http
from openerp.http import request
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % werkzeug.url_quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(http.Controller):
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, oidrequest):
"""Add extensions to the oidrequest"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
oidrequest.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
oidrequest.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self):
return request.httprequest.host_url
@http.route('/auth_openid/login/verify_direct', type='http', auth='none')
def verify_direct(self, db, url):
result = self._verify(db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@http.route('/auth_openid/login/verify', type='json', auth='none')
def verify(self, db, url):
return self._verify(db, url)
def _verify(self, db, url):
redirect_to = werkzeug.urls.Href(request.httprequest.host_url + 'auth_openid/login/process')(session_id=request.session_id)
realm = self._get_realm()
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
oidrequest = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if oidrequest is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
request.session.openid_session = session
self._add_extensions(oidrequest)
if oidrequest.shouldSendRedirect():
redirect_url = oidrequest.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': request.session_id}
else:
form_html = oidrequest.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': request.session_id}
@http.route('/auth_openid/login/process', type='http', auth='none')
def process(self, **kw):
session = getattr(request.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect('/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = request.httprequest.args
info = oidconsumer.complete(query, request.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect('/#action=login&loginerror=1')
@http.route('/auth_openid/login/status', type='json', auth='none')
def status(self):
session = getattr(request.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
drwyrm/Flexget
|
flexget/plugins/sites/archetorrent.py
|
1
|
1084
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger("archetorrent")
class UrlRewriteArchetorrent(object):
"""Archetorrent urlrewriter."""
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
return url.startswith('https://www.archetorrent.com') and url.find('download') == -1
# urlrewriter API
def url_rewrite(self, task, entry):
if 'url' not in entry:
log.error("Didn't actually get a URL...")
else:
log.debug("Got the URL: %s" % entry['url'])
entry['url'] = entry['url'].replace('torrents-details', 'download')
entry['url'] = entry['url'].replace('&hit=1', '')
log.debug("New URL: %s" % entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteArchetorrent, 'archetorrent', groups=['urlrewriter'], api_ver=2)
|
mit
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/nose/suite.py
|
23
|
22234
|
"""
Test Suites
-----------
Provides a LazySuite, which is a suite whose test list is a generator
function, and ContextSuite,which can run fixtures (setup/teardown
functions or methods) for the context that contains its tests.
"""
from __future__ import generators
import logging
import sys
import unittest
from nose.case import Test
from nose.config import Config
from nose.proxy import ResultProxyFactory
from nose.util import isclass, resolve_name, try_run
if sys.platform == 'cli':
if sys.version_info[:2] < (2, 6):
import clr
clr.AddReference("IronPython")
from IronPython.Runtime.Exceptions import StringException
else:
class StringException(Exception):
pass
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
# Singleton for default value -- see ContextSuite.__init__ below
_def = object()
def _strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class MixedContextError(Exception):
"""Error raised when a context suite sees tests from more than
one context.
"""
pass
class LazySuite(unittest.TestSuite):
"""A suite that may use a generator as its list of tests
"""
def __init__(self, tests=()):
"""Initialize the suite. tests may be an iterable or a generator
"""
self._set_tests(tests)
def __iter__(self):
return iter(self._tests)
def __repr__(self):
return "<%s tests=generator (%s)>" % (
_strclass(self.__class__), id(self))
def __hash__(self):
return object.__hash__(self)
__str__ = __repr__
def addTest(self, test):
self._precache.append(test)
# added to bypass run changes in 2.7's unittest
def run(self, result):
for test in self._tests:
if result.shouldStop:
break
test(result)
return result
def __nonzero__(self):
log.debug("tests in %s?", id(self))
if self._precache:
return True
if self.test_generator is None:
return False
try:
test = self.test_generator.next()
if test is not None:
self._precache.append(test)
return True
except StopIteration:
pass
return False
def _get_tests(self):
log.debug("precache is %s", self._precache)
for test in self._precache:
yield test
if self.test_generator is None:
return
for test in self.test_generator:
yield test
def _set_tests(self, tests):
self._precache = []
is_suite = isinstance(tests, unittest.TestSuite)
if callable(tests) and not is_suite:
self.test_generator = tests()
elif is_suite:
# Suites need special treatment: they must be called like
# tests for their setup/teardown to run (if any)
self.addTests([tests])
self.test_generator = None
else:
self.addTests(tests)
self.test_generator = None
_tests = property(_get_tests, _set_tests, None,
"Access the tests in this suite. Access is through a "
"generator, so iteration may not be repeatable.")
class ContextSuite(LazySuite):
"""A suite with context.
A ContextSuite executes fixtures (setup and teardown functions or
methods) for the context containing its tests.
The context may be explicitly passed. If it is not, a context (or
nested set of contexts) will be constructed by examining the tests
in the suite.
"""
failureException = unittest.TestCase.failureException
was_setup = False
was_torndown = False
classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
'setUpClass', 'setUpAll')
classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
'teardownAll', 'tearDownClass', 'tearDownAll')
moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
'setUp')
moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
'teardown', 'tearDown')
packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
packageTeardown = ('teardown_package', 'teardownPackage',
'tearDownPackage')
def __init__(self, tests=(), context=None, factory=None,
config=None, resultProxy=None, can_split=True):
log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
self.context = context
self.factory = factory
if config is None:
config = Config()
self.config = config
self.resultProxy = resultProxy
self.has_run = False
self.can_split = can_split
self.error_context = None
LazySuite.__init__(self, tests)
def __repr__(self):
return "<%s context=%s>" % (
_strclass(self.__class__),
getattr(self.context, '__name__', self.context))
__str__ = __repr__
def id(self):
if self.error_context:
return '%s:%s' % (repr(self), self.error_context)
else:
return repr(self)
def __hash__(self):
return object.__hash__(self)
# 2.3 compat -- force 2.4 call sequence
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
def exc_info(self):
"""Hook for replacing error tuple output
"""
return sys.exc_info()
def _exc_info(self):
"""Bottleneck to fix up IronPython string exceptions
"""
e = self.exc_info()
if sys.platform == 'cli':
if isinstance(e[0], StringException):
# IronPython throws these StringExceptions, but
# traceback checks type(etype) == str. Make a real
# string here.
e = (str(e[0]), e[1], e[2])
return e
def run(self, result):
"""Run tests in suite inside of suite fixtures.
"""
# proxy the result for myself
log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
#import pdb
#pdb.set_trace()
if self.resultProxy:
result, orig = self.resultProxy(result, self), result
else:
result, orig = result, result
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
self.error_context = 'setup'
result.addError(self, self._exc_info())
return
try:
for test in self._tests:
if result.shouldStop:
log.debug("stopping")
break
# each nose.case.Test will create its own result proxy
# so the cases need the original result, to avoid proxy
# chains
test(orig)
finally:
self.has_run = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
self.error_context = 'teardown'
result.addError(self, self._exc_info())
def hasFixtures(self, ctx_callback=None):
context = self.context
if context is None:
return False
if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
return True
# My context doesn't have any, but its ancestors might
factory = self.factory
if factory:
ancestors = factory.context.get(self, [])
for ancestor in ancestors:
if self.implementsAnyFixture(
ancestor, ctx_callback=ctx_callback):
return True
return False
def implementsAnyFixture(self, context, ctx_callback):
if isclass(context):
names = self.classSetup + self.classTeardown
else:
names = self.moduleSetup + self.moduleTeardown
if hasattr(context, '__path__'):
names += self.packageSetup + self.packageTeardown
# If my context has any fixture attribute, I have fixtures
fixt = False
for m in names:
if hasattr(context, m):
fixt = True
break
if ctx_callback is None:
return fixt
return ctx_callback(context, fixt)
def setUp(self):
log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
if not self:
# I have no tests
log.debug("suite %s has no tests", id(self))
return
if self.was_setup:
log.debug("suite %s already set up", id(self))
return
context = self.context
if context is None:
return
# before running my own context's setup, I need to
# ask the factory if my context's contexts' setups have been run
factory = self.factory
if factory:
# get a copy, since we'll be destroying it as we go
ancestors = factory.context.get(self, [])[:]
while ancestors:
ancestor = ancestors.pop()
log.debug("ancestor %s may need setup", ancestor)
if ancestor in factory.was_setup:
continue
log.debug("ancestor %s does need setup", ancestor)
self.setupContext(ancestor)
if not context in factory.was_setup:
self.setupContext(context)
else:
self.setupContext(context)
self.was_setup = True
log.debug("completed suite setup")
def setupContext(self, context):
self.config.plugins.startContext(context)
log.debug("%s setup context %s", self, context)
if self.factory:
if context in self.factory.was_setup:
return
# note that I ran the setup for this context, so that I'll run
# the teardown in my teardown
self.factory.was_setup[context] = self
if isclass(context):
names = self.classSetup
else:
names = self.moduleSetup
if hasattr(context, '__path__'):
names = self.packageSetup + names
try_run(context, names)
def shortDescription(self):
if self.context is None:
return "test suite"
return "test suite for %s" % self.context
def tearDown(self):
log.debug('context teardown')
if not self.was_setup or self.was_torndown:
log.debug(
"No reason to teardown (was_setup? %s was_torndown? %s)"
% (self.was_setup, self.was_torndown))
return
self.was_torndown = True
context = self.context
if context is None:
log.debug("No context to tear down")
return
# for each ancestor... if the ancestor was setup
# and I did the setup, I can do teardown
factory = self.factory
if factory:
ancestors = factory.context.get(self, []) + [context]
for ancestor in ancestors:
log.debug('ancestor %s may need teardown', ancestor)
if not ancestor in factory.was_setup:
log.debug('ancestor %s was not setup', ancestor)
continue
if ancestor in factory.was_torndown:
log.debug('ancestor %s already torn down', ancestor)
continue
setup = factory.was_setup[ancestor]
log.debug("%s setup ancestor %s", setup, ancestor)
if setup is self:
self.teardownContext(ancestor)
else:
self.teardownContext(context)
def teardownContext(self, context):
log.debug("%s teardown context %s", self, context)
if self.factory:
if context in self.factory.was_torndown:
return
self.factory.was_torndown[context] = self
if isclass(context):
names = self.classTeardown
else:
names = self.moduleTeardown
if hasattr(context, '__path__'):
names = self.packageTeardown + names
try_run(context, names)
self.config.plugins.stopContext(context)
# FIXME the wrapping has to move to the factory?
def _get_wrapped_tests(self):
for test in self._get_tests():
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
yield test
else:
yield Test(test,
config=self.config,
resultProxy=self.resultProxy)
_tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
"Access the tests in this suite. Tests are returned "
"inside of a context wrapper.")
class ContextSuiteFactory(object):
"""Factory for ContextSuites. Called with a collection of tests,
the factory decides on a hierarchy of contexts by introspecting
the collection or the tests themselves to find the objects
containing the test objects. It always returns one suite, but that
suite may consist of a hierarchy of nested suites.
"""
suiteClass = ContextSuite
def __init__(self, config=None, suiteClass=None, resultProxy=_def):
if config is None:
config = Config()
self.config = config
if suiteClass is not None:
self.suiteClass = suiteClass
# Using a singleton to represent default instead of None allows
# passing resultProxy=None to turn proxying off.
if resultProxy is _def:
resultProxy = ResultProxyFactory(config=config)
self.resultProxy = resultProxy
self.suites = {}
self.context = {}
self.was_setup = {}
self.was_torndown = {}
def __call__(self, tests, **kw):
"""Return ``ContextSuite`` for tests. ``tests`` may either
be a callable (in which case the resulting ContextSuite will
have no parent context and be evaluated lazily) or an
iterable. In that case the tests will wrapped in
nose.case.Test, be examined and the context of each found and a
suite of suites returned, organized into a stack with the
outermost suites belonging to the outermost contexts.
"""
log.debug("Create suite for %s", tests)
context = kw.pop('context', getattr(tests, 'context', None))
log.debug("tests %s context %s", tests, context)
if context is None:
tests = self.wrapTests(tests)
try:
context = self.findContext(tests)
except MixedContextError:
return self.makeSuite(self.mixedSuites(tests), None, **kw)
return self.makeSuite(tests, context, **kw)
def ancestry(self, context):
"""Return the ancestry of the context (that is, all of the
packages and modules containing the context), in order of
descent with the outermost ancestor last.
This method is a generator.
"""
log.debug("get ancestry %s", context)
if context is None:
return
# Methods include reference to module they are defined in, we
# don't want that, instead want the module the class is in now
# (classes are re-ancestored elsewhere).
if hasattr(context, 'im_class'):
context = context.im_class
elif hasattr(context, '__self__'):
context = context.__self__.__class__
if hasattr(context, '__module__'):
ancestors = context.__module__.split('.')
elif hasattr(context, '__name__'):
ancestors = context.__name__.split('.')[:-1]
else:
raise TypeError("%s has no ancestors?" % context)
while ancestors:
log.debug(" %s ancestors %s", context, ancestors)
yield resolve_name('.'.join(ancestors))
ancestors.pop()
def findContext(self, tests):
if callable(tests) or isinstance(tests, unittest.TestSuite):
return None
context = None
for test in tests:
# Don't look at suites for contexts, only tests
ctx = getattr(test, 'context', None)
if ctx is None:
continue
if context is None:
context = ctx
elif context != ctx:
raise MixedContextError(
"Tests with different contexts in same suite! %s != %s"
% (context, ctx))
return context
def makeSuite(self, tests, context, **kw):
suite = self.suiteClass(
tests, context=context, config=self.config, factory=self,
resultProxy=self.resultProxy, **kw)
if context is not None:
self.suites.setdefault(context, []).append(suite)
self.context.setdefault(suite, []).append(context)
log.debug("suite %s has context %s", suite,
getattr(context, '__name__', None))
for ancestor in self.ancestry(context):
self.suites.setdefault(ancestor, []).append(suite)
self.context[suite].append(ancestor)
log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
return suite
def mixedSuites(self, tests):
"""The complex case where there are tests that don't all share
the same context. Groups tests into suites with common ancestors,
according to the following (essentially tail-recursive) procedure:
Starting with the context of the first test, if it is not
None, look for tests in the remaining tests that share that
ancestor. If any are found, group into a suite with that
ancestor as the context, and replace the current suite with
that suite. Continue this process for each ancestor of the
first test, until all ancestors have been processed. At this
point if any tests remain, recurse with those tests as the
input, returning a list of the common suite (which may be the
suite or test we started with, if no common tests were found)
plus the results of recursion.
"""
if not tests:
return []
head = tests.pop(0)
if not tests:
return [head] # short circuit when none are left to combine
suite = head # the common ancestry suite, so far
tail = tests[:]
context = getattr(head, 'context', None)
if context is not None:
ancestors = [context] + [a for a in self.ancestry(context)]
for ancestor in ancestors:
common = [suite] # tests with ancestor in common, so far
remain = [] # tests that remain to be processed
for test in tail:
found_common = False
test_ctx = getattr(test, 'context', None)
if test_ctx is None:
remain.append(test)
continue
if test_ctx is ancestor:
common.append(test)
continue
for test_ancestor in self.ancestry(test_ctx):
if test_ancestor is ancestor:
common.append(test)
found_common = True
break
if not found_common:
remain.append(test)
if common:
suite = self.makeSuite(common, ancestor)
tail = self.mixedSuites(remain)
return [suite] + tail
def wrapTests(self, tests):
log.debug("wrap %s", tests)
if callable(tests) or isinstance(tests, unittest.TestSuite):
log.debug("I won't wrap")
return tests
wrapped = []
for test in tests:
log.debug("wrapping %s", test)
if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
wrapped.append(test)
elif isinstance(test, ContextList):
wrapped.append(self.makeSuite(test, context=test.context))
else:
wrapped.append(
Test(test, config=self.config, resultProxy=self.resultProxy)
)
return wrapped
class ContextList(object):
"""Not quite a suite -- a group of tests in a context. This is used
to hint the ContextSuiteFactory about what context the tests
belong to, in cases where it may be ambiguous or missing.
"""
def __init__(self, tests, context=None):
self.tests = tests
self.context = context
def __iter__(self):
return iter(self.tests)
class FinalizingSuiteWrapper(unittest.TestSuite):
"""Wraps suite and calls final function after suite has
executed. Used to call final functions in cases (like running in
the standard test runner) where test running is not under nose's
control.
"""
def __init__(self, suite, finalize):
self.suite = suite
self.finalize = finalize
def __call__(self, *arg, **kw):
return self.run(*arg, **kw)
# 2.7 compat
def __iter__(self):
return iter(self.suite)
def run(self, *arg, **kw):
try:
return self.suite(*arg, **kw)
finally:
self.finalize(*arg, **kw)
# backwards compat -- sort of
class TestDir:
def __init__(*arg, **kw):
raise NotImplementedError(
"TestDir is not usable with nose 0.10. The class is present "
"in nose.suite for backwards compatibility purposes but it "
"may not be used.")
class TestModule:
def __init__(*arg, **kw):
raise NotImplementedError(
"TestModule is not usable with nose 0.10. The class is present "
"in nose.suite for backwards compatibility purposes but it "
"may not be used.")
|
lgpl-3.0
|
xlsdg/phantomjs-linux-armv7l
|
src/breakpad/src/tools/gyp/test/library/gyptest-shared.py
|
430
|
2230
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple build of a "Hello, world!" program with shared libraries,
including verifying that libraries are rebuilt correctly when functions
move between libraries.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('library.gyp',
'-Dlibrary=shared_library',
'-Dmoveable_function=lib1',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=shared_library',
'-Dmoveable_function=lib2',
chdir='relocate/src')
# Update program.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('Hello', 'Hello again')
test.write('relocate/src/program.c', contents)
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib2_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=shared_library',
'-Dmoveable_function=lib1',
chdir='relocate/src')
# Update program.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('again', 'again again')
test.write('relocate/src/program.c', contents)
# TODO(sgk): we have to force a rebuild of lib2 so that it weeds out
# the "moved" module. This should be done in gyp by adding a dependency
# on the generated .vcproj file itself.
test.touch('relocate/src/lib2.c')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
|
bsd-3-clause
|
swdream/neutron
|
neutron/db/migration/alembic_migrations/versions/35a0f3365720_add_port_security_in_ml2.py
|
47
|
2014
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add port-security in ml2
Revision ID: 35a0f3365720
Revises: 341ee8a4ccb5
Create Date: 2014-09-30 09:41:14.146519
"""
# revision identifiers, used by Alembic.
revision = '35a0f3365720'
down_revision = '341ee8a4ccb5'
from alembic import op
def upgrade():
context = op.get_context()
if context.bind.dialect.name == 'ibm_db_sa':
# NOTE(junxie): DB2 stores booleans as 0 and 1.
op.execute('INSERT INTO networksecuritybindings (network_id, '
'port_security_enabled) SELECT id, 1 FROM networks '
'WHERE id NOT IN (SELECT network_id FROM '
'networksecuritybindings);')
op.execute('INSERT INTO portsecuritybindings (port_id, '
'port_security_enabled) SELECT id, 1 FROM ports '
'WHERE id NOT IN (SELECT port_id FROM '
'portsecuritybindings);')
else:
op.execute('INSERT INTO networksecuritybindings (network_id, '
'port_security_enabled) SELECT id, True FROM networks '
'WHERE id NOT IN (SELECT network_id FROM '
'networksecuritybindings);')
op.execute('INSERT INTO portsecuritybindings (port_id, '
'port_security_enabled) SELECT id, True FROM ports '
'WHERE id NOT IN (SELECT port_id FROM '
'portsecuritybindings);')
|
apache-2.0
|
havt/odoo
|
addons/share/wizard/share_wizard.py
|
182
|
51223
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.SystemRandom().sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='share', group_xml_id='group_share_user', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
help="Access rights to be granted on the shared documents."),
'result_line_ids': fields.one2many('share.wizard.result.line', 'share_wizard_id', 'Summary', readonly=True),
'share_root_url': fields.function(_share_root_url, string='Share Access URL', type='char', readonly=True,
help='Main access page for users that are granted shared access'),
'name': fields.char('Share Title', required=True, help="Title for the share (displayed to users as menu and shortcut name)"),
'record_name': fields.char('Record name', help="Name of the shared record, if sharing a precise record"),
'message': fields.text("Personal Message", help="An optional personal message, to be included in the email notification."),
'embed_code': fields.function(_embed_code, type='text', string='Code',
help="Embed this code in your documents to provide a link to the "\
"shared document."),
'embed_option_title': fields.boolean('Display title'),
'embed_option_search': fields.boolean('Display search view'),
'embed_url': fields.function(_embed_url, string='Share URL', size=512, type='char', readonly=True),
}
_defaults = {
'view_type': 'page',
'user_type' : 'embedded',
'invite': False,
'domain': lambda self, cr, uid, context, *a: context.get('domain', '[]'),
'action_id': lambda self, cr, uid, context, *a: context.get('action_id'),
'access_mode': 'readwrite',
'embed_option_title': True,
'embed_option_search': True,
}
def has_email(self, cr, uid, context=None):
return bool(self.pool.get('res.users').browse(cr, uid, uid, context=context).email)
def go_step_1(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr,uid,ids,context)[0]
if wizard_data.user_type == 'emails' and not self.has_email(cr, uid, context=context):
raise osv.except_osv(_('No email address configured'),
_('You must configure your email address in the user preferences before using the Share button.'))
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'action_share_wizard_step1')
action = self.pool[model].read(cr, uid, [res_id], context=context)[0]
action['res_id'] = ids[0]
action.pop('context', '')
return action
def _create_share_group(self, cr, uid, wizard_data, context=None):
group_obj = self.pool.get('res.groups')
share_group_name = '%s: %s (%d-%s)' %('Shared', wizard_data.name, uid, time.time())
values = {'name': share_group_name, 'share': True}
try:
implied_group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'group_shared')[1]
except ValueError:
implied_group_id = None
if implied_group_id:
values['implied_ids'] = [(4, implied_group_id)]
# create share group without putting admin in it
return group_obj.create(cr, UID_ROOT, values, {'noadmin': True})
def _create_new_share_users(self, cr, uid, wizard_data, group_id, context=None):
"""Create one new res.users record for each email address provided in
wizard_data.new_users, ignoring already existing users.
Populates wizard_data.result_line_ids with one new line for
each user (existing or not). New users will also have a value
for the password field, so they can receive it by email.
Returns the ids of the created users, and the ids of the
ignored, existing ones."""
context = dict(context or {})
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, UID_ROOT, uid, context=context)
# modify context to disable shortcuts when creating share users
context['noshortcut'] = True
context['no_reset_password'] = True
created_ids = []
existing_ids = []
if wizard_data.user_type == 'emails':
# get new user list from email data
new_users = (wizard_data.new_users or '').split('\n')
new_users += [wizard_data.email_1 or '', wizard_data.email_2 or '', wizard_data.email_3 or '']
for new_user in new_users:
# Ignore blank lines
new_user = new_user.strip()
if not new_user: continue
# Ignore the user if it already exists.
if not wizard_data.invite:
existing = user_obj.search(cr, UID_ROOT, [('login', '=', new_user)])
else:
existing = user_obj.search(cr, UID_ROOT, [('email', '=', new_user)])
existing_ids.extend(existing)
if existing:
new_line = { 'user_id': existing[0],
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
continue
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_user,
'password': new_pass,
'name': new_user,
'email': new_user,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
elif wizard_data.user_type == 'embedded':
new_login = 'embedded-%s' % (uuid.uuid4().hex,)
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_login,
'password': new_pass,
'name': new_login,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
return created_ids, existing_ids
def _create_action(self, cr, uid, values, context=None):
if context is None:
context = {}
new_context = context.copy()
for key in context:
if key.startswith('default_'):
del new_context[key]
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, values, new_context)
return action_id
def _cleanup_action_context(self, context_str, user_id):
"""Returns a dict representing the context_str evaluated (safe_eval) as
a dict where items that are not useful for shared actions
have been removed. If the evaluation of context_str as a
dict fails, context_str is returned unaltered.
:param user_id: the integer uid to be passed as 'uid' in the
evaluation context
"""
result = False
if context_str:
try:
context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
result = dict(context)
for key in context:
# Remove all context keys that seem to toggle default
# filters based on the current user, as it makes no sense
# for shared users, who would not see any data by default.
if key and key.startswith('search_default_') and 'user_id' in key:
result.pop(key)
except Exception:
# Note: must catch all exceptions, as UnquoteEvalContext may cause many
# different exceptions, as it shadows builtins.
_logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
result = context_str
return result
def _shared_action_def(self, cr, uid, wizard_data, context=None):
copied_action = wizard_data.action_id
if wizard_data.access_mode == 'readonly':
view_mode = wizard_data.view_type
view_id = copied_action.view_id.id if copied_action.view_id.type == wizard_data.view_type else False
else:
view_mode = copied_action.view_mode
view_id = copied_action.view_id.id
action_def = {
'name': wizard_data.name,
'domain': copied_action.domain,
'context': self._cleanup_action_context(wizard_data.action_id.context, uid),
'res_model': copied_action.res_model,
'view_mode': view_mode,
'view_type': copied_action.view_type,
'search_view_id': copied_action.search_view_id.id if wizard_data.access_mode != 'readonly' else False,
'view_id': view_id,
'auto_search': True,
}
if copied_action.view_ids:
action_def['view_ids'] = [(0,0,{'sequence': x.sequence,
'view_mode': x.view_mode,
'view_id': x.view_id.id })
for x in copied_action.view_ids
if (wizard_data.access_mode != 'readonly' or x.view_mode == wizard_data.view_type)
]
return action_def
def _setup_action_and_shortcut(self, cr, uid, wizard_data, user_ids, make_home, context=None):
"""Create a shortcut to reach the shared data, as well as the corresponding action, for
each user in ``user_ids``, and assign it as their home action if ``make_home`` is True.
Meant to be overridden for special cases.
"""
values = self._shared_action_def(cr, uid, wizard_data, context=None)
user_obj = self.pool.get('res.users')
for user_id in user_ids:
action_id = self._create_action(cr, user_id, values)
if make_home:
# We do this only for new share users, as existing ones already have their initial home
# action. Resetting to the default menu does not work well as the menu is rather empty
# and does not contain the shortcuts in most cases.
user_obj.write(cr, UID_ROOT, [user_id], {'action_id': action_id})
def _get_recursive_relations(self, cr, uid, model, ttypes, relation_fields=None, suffix=None, context=None):
"""Returns list of tuples representing recursive relationships of type ``ttypes`` starting from
model with ID ``model_id``.
:param model: browsable model to start loading relationships from
:param ttypes: list of relationship types to follow (e.g: ['one2many','many2many'])
:param relation_fields: list of previously followed relationship tuples - to avoid duplicates
during recursion
:param suffix: optional suffix to append to the field path to reach the main object
"""
if relation_fields is None:
relation_fields = []
local_rel_fields = []
models = [x[1].model for x in relation_fields]
model_obj = self.pool.get('ir.model')
model_osv = self.pool[model.model]
for field in model_osv._fields.itervalues():
ftype = field.type
relation_field = None
if ftype in ttypes and field.comodel_name not in models:
relation_model_id = model_obj.search(cr, UID_ROOT, [('model','=',field.comodel_name)])[0]
relation_model_browse = model_obj.browse(cr, UID_ROOT, relation_model_id, context=context)
relation_osv = self.pool[field.comodel_name]
#skip virtual one2many fields (related, ...) as there is no reverse relationship
if ftype == 'one2many' and field.inverse_name:
# don't record reverse path if it's not a real m2o (that happens, but rarely)
dest_fields = relation_osv._fields
reverse_rel = field.inverse_name
if reverse_rel in dest_fields and dest_fields[reverse_rel].type == 'many2one':
relation_field = ('%s.%s'%(reverse_rel, suffix)) if suffix else reverse_rel
local_rel_fields.append((relation_field, relation_model_browse))
for parent in relation_osv._inherits:
if parent not in models:
parent_model = self.pool[parent]
parent_fields = parent_model._fields
parent_model_browse = model_obj.browse(cr, UID_ROOT,
model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
if relation_field and field.inverse_name in parent_fields:
# inverse relationship is available in the parent
local_rel_fields.append((relation_field, parent_model_browse))
else:
# TODO: can we setup a proper rule to restrict inherited models
# in case the parent does not contain the reverse m2o?
local_rel_fields.append((None, parent_model_browse))
if relation_model_id != model.id and ftype in ['one2many', 'many2many']:
local_rel_fields += self._get_recursive_relations(cr, uid, relation_model_browse,
[ftype], relation_fields + local_rel_fields, suffix=relation_field, context=context)
return local_rel_fields
def _get_relationship_classes(self, cr, uid, model, context=None):
"""Computes the *relationship classes* reachable from the given
model. The 4 relationship classes are:
- [obj0]: the given model itself (and its parents via _inherits, if any)
- [obj1]: obj0 and all other models recursively accessible from
obj0 via one2many relationships
- [obj2]: obj0 and all other models recursively accessible from
obj0 via one2many and many2many relationships
- [obj3]: all models recursively accessible from obj1 via many2one
relationships
Each class is returned as a list of pairs [(field,model_browse)], where
``model`` is the browse_record of a reachable ir.model, and ``field`` is
the dot-notation reverse relationship path coming from that model to obj0,
or None if there is no reverse path.
:return: ([obj0], [obj1], [obj2], [obj3])
"""
# obj0 class and its parents
obj0 = [(None, model)]
model_obj = self.pool[model.model]
ir_model_obj = self.pool.get('ir.model')
for parent in model_obj._inherits:
parent_model_browse = ir_model_obj.browse(cr, UID_ROOT,
ir_model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
obj0 += [(None, parent_model_browse)]
obj1 = self._get_recursive_relations(cr, uid, model, ['one2many'], relation_fields=obj0, context=context)
obj2 = self._get_recursive_relations(cr, uid, model, ['one2many', 'many2many'], relation_fields=obj0, context=context)
obj3 = self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
for dummy, model in obj1:
obj3 += self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
return obj0, obj1, obj2, obj3
def _get_access_map_for_groups_and_models(self, cr, uid, group_ids, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
user_right_ids = model_access_obj.search(cr, uid,
[('group_id', 'in', group_ids), ('model_id', 'in', model_ids)],
context=context)
user_access_matrix = {}
if user_right_ids:
for access_right in model_access_obj.browse(cr, uid, user_right_ids, context=context):
access_line = user_access_matrix.setdefault(access_right.model_id.model, set())
for perm in FULL_ACCESS:
if getattr(access_right, perm, 0):
access_line.add(perm)
return user_access_matrix
def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):
"""Adds access rights to group_id on object models referenced in ``fields_relations``,
intersecting with access rights of current user to avoid granting too much rights
"""
model_access_obj = self.pool.get('ir.model.access')
user_obj = self.pool.get('res.users')
target_model_ids = [x[1].id for x in fields_relations]
perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS
current_user = user_obj.browse(cr, uid, uid, context=context)
current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[x.id for x in current_user.groups_id], target_model_ids, context=context)
group_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[group_id], target_model_ids, context=context)
_logger.debug("Current user access matrix: %r", current_user_access_map)
_logger.debug("New group current access matrix: %r", group_access_map)
# Create required rights if allowed by current user rights and not
# already granted
for dummy, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message', 'mail.notification', 'res.company']: continue
values = {
'name': _('Copied access for sharing'),
'group_id': group_id,
'model_id': model.id,
}
current_user_access_line = current_user_access_map.get(model.model,set())
existing_group_access_line = group_access_map.get(model.model,set())
need_creation = False
for perm in perms_to_add:
if perm in current_user_access_line \
and perm not in existing_group_access_line:
values.update({perm:True})
group_access_map.setdefault(model.model, set()).add(perm)
need_creation = True
if need_creation:
model_access_obj.create(cr, UID_ROOT, values)
_logger.debug("Creating access right for model %s with values: %r", model.model, values)
def _link_or_copy_current_user_rules(self, cr, current_user, group_id, fields_relations, context=None):
rule_obj = self.pool.get('ir.rule')
rules_done = set()
for group in current_user.groups_id:
for dummy, model in fields_relations:
for rule in group.rule_groups:
if rule.id in rules_done:
continue
rules_done.add(rule.id)
if rule.model_id.id == model.id:
if 'user.' in rule.domain_force:
# Above pattern means there is likely a condition
# specific to current user, so we must copy the rule using
# the evaluated version of the domain.
# And it's better to copy one time too much than too few
rule_obj.copy(cr, UID_ROOT, rule.id, default={
'name': '%s %s' %(rule.name, _('(Copy for sharing)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain, # evaluated version!
})
_logger.debug("Copying rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
else:
# otherwise we can simply link the rule to keep it dynamic
rule_obj.write(cr, SUPERUSER_ID, [rule.id], {
'groups': [(4,group_id)]
})
_logger.debug("Linking rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
def _check_personal_rule_or_duplicate(self, cr, group_id, rule, context=None):
"""Verifies that the given rule only belongs to the given group_id, otherwise
duplicate it for the current group, and unlink the previous one.
The duplicated rule has the original domain copied verbatim, without
any evaluation.
Returns the final rule to use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(safe_eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(safe_eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message', 'mail.notification', 'res.company']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Share/User group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# new users by default receive all notifications by email
# A.
if not wizard_data.invite:
self.send_emails(cr, uid, wizard_data, context=context)
# B.
else:
# Invite (OpenSocial): automatically subscribe users to the record
res_id = 0
for cond in safe_eval(main_domain):
if cond[0] == 'id':
res_id = cond[2]
# Record id not found: issue
if res_id <= 0:
raise osv.except_osv(_('Record id not found'), _('The share engine has not been able to fetch a record_id for your invitation.'))
self.pool[model.model].message_subscribe(cr, uid, [res_id], new_ids + existing_ids, context=context)
# self.send_invite_email(cr, uid, wizard_data, context=context)
# self.send_invite_note(cr, uid, model.model, res_id, wizard_data, context=context)
# CLOSE
# A. Not invite: as before
# B. Invite: skip summary screen, get back to the record
# A.
if not wizard_data.invite:
dummy, step2_form_view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'share_step2_form')
return {
'name': _('Shared access created!'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'share.wizard',
'view_id': False,
'res_id': ids[0],
'views': [(step2_form_view_id, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
# B.
else:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': model.model,
'view_id': False,
'res_id': res_id,
'views': [(False, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def send_invite_note(self, cr, uid, model_name, res_id, wizard_data, context=None):
subject = _('Invitation')
body = 'has been <b>shared</b> with'
tmp_idx = 0
for result_line in wizard_data.result_line_ids:
body += ' @%s' % (result_line.user_id.login)
if tmp_idx < len(wizard_data.result_line_ids)-2:
body += ','
elif tmp_idx == len(wizard_data.result_line_ids)-2:
body += ' and'
body += '.'
return self.pool[model_name].message_post(cr, uid, [res_id], body=body, context=context)
def send_invite_email(self, cr, uid, wizard_data, context=None):
# TDE Note: not updated because will disappear
message_obj = self.pool.get('mail.message')
notification_obj = self.pool.get('mail.notification')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = _('Invitation to collaborate about %s') % (wizard_data.record_name)
body = _("Hello,\n\n")
body += _("I have shared %s (%s) with you!\n\n") % (wizard_data.record_name, wizard_data.name)
if wizard_data.message:
body += "%s\n\n" % (wizard_data.message)
if result_line.newly_created:
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s" % (_("Username"), result_line.user_id.login) + "\n"
body += "%s: %s" % (_("Password"), result_line.password) + "\n"
body += "%s: %s" % (_("Database"), cr.dbname) + "\n"
body += _("The documents have been automatically added to your subscriptions.\n\n")
body += '%s\n\n' % ((user.signature or ''))
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on https://www.odoo.com.")
msg_id = message_obj.schedule_with_attach(cr, uid, user.email, [email_to], subject, body, model='', context=context)
notification_obj.create(cr, uid, {'user_id': result_line.user_id.id, 'message_id': msg_id}, context=context)
def send_emails(self, cr, uid, wizard_data, context=None):
_logger.info('Sending share notifications by email...')
mail_mail = self.pool.get('mail.mail')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
mail_ids = []
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = wizard_data.name
body = _("Hello,\n\n")
body += _("I've shared %s with you!\n\n") % wizard_data.name
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
if wizard_data.message:
body += '%s\n\n' % (wizard_data.message)
if result_line.newly_created:
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s\n" % (_("Username"), result_line.user_id.login)
body += "%s: %s\n" % (_("Password"), result_line.password)
body += "%s: %s\n" % (_("Database"), cr.dbname)
else:
body += _("The documents have been automatically added to your current Odoo documents.\n")
body += _("You may use your current login (%s) and password to view them.\n") % result_line.user_id.login
body += "\n\n%s\n\n" % ( (user.signature or '') )
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on https://www.odoo.com.")
mail_ids.append(mail_mail.create(cr, uid, {
'email_from': user.email,
'email_to': email_to,
'subject': subject,
'body_html': '<pre>%s</pre>' % body}, context=context))
# force direct delivery, as users expect instant notification
mail_mail.send(cr, uid, mail_ids, context=context)
_logger.info('%d share notification(s) sent.', len(mail_ids))
def onchange_embed_options(self, cr, uid, ids, opt_title, opt_search, context=None):
wizard = self.browse(cr, uid, ids[0], context)
options = dict(title=opt_title, search=opt_search)
return {'value': {'embed_code': self._generate_embedded_code(wizard, options)}}
class share_result_line(osv.osv_memory):
_name = 'share.wizard.result.line'
_rec_name = 'user_id'
def _share_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
data = dict(dbname=cr.dbname, login=this.login, password=this.password)
if this.share_wizard_id and this.share_wizard_id.action_id:
data['action'] = this.share_wizard_id.action_id.id
this = this.with_context(share_url_template_hash_arguments=['action'])
result[this.id] = this.share_wizard_id.share_url_template() % data
return result
_columns = {
'user_id': fields.many2one('res.users', required=True, readonly=True),
'login': fields.related('user_id', 'login', string='Login', type='char', size=64, required=True, readonly=True),
'password': fields.char('Password', size=64, readonly=True),
'share_url': fields.function(_share_url, string='Share URL', type='char', size=512),
'share_wizard_id': fields.many2one('share.wizard', 'Share Wizard', required=True, ondelete='cascade'),
'newly_created': fields.boolean('Newly created', readonly=True),
}
_defaults = {
'newly_created': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
EnviroCentre/jython-upgrade
|
jython/lib/test/test_subprocess_jy.py
|
2
|
4882
|
"""Misc subprocess tests"""
import unittest
import os
import sys
import signal
import time
from test import test_support
from subprocess import PIPE, Popen, _cmdline2list
class TerminationAndSignalTest(unittest.TestCase):
def setUp(self):
program = '''
import signal, sys
def print_signal(signum, frame):
print signum
def exit_signal(signum, frame):
sys.exit(signum)
signal.signal(signal.SIGTERM, print_signal)
signal.signal(signal.SIGINT, exit_signal)
print 'Started'
sys.stdout.flush()
while True:
pass
'''
self.proc = Popen(['python', '-c', program], stdout=PIPE, stderr=PIPE)
assert self.proc.stdout.readline().strip() == 'Started'
def tearDown(self):
if self.proc.poll() is None:
self.proc.kill()
def test_kill(self):
self.proc.kill()
self.assertNotEqual(self.proc.wait(), 0)
if os._name != 'nt':
def test_terminate_can_be_ignored_on_posix(self):
self.proc.terminate()
self.assertIsNone(self.proc.poll())
def test_send_signals_on_posix(self):
self.proc.send_signal(signal.SIGTERM)
time.sleep(0.01) # Make sure SIGTERM is handled first
self.proc.send_signal(signal.SIGINT)
self.assertEqual(self.proc.wait(), 2)
self.assertEqual(self.proc.stdout.read(), '15\n')
else:
def test_terminate_cannot_be_ignored_on_windows(self):
self.proc.terminate()
self.assertNotEqual(self.proc.wait(), 0)
def test_sending_sigterm_signal_terminates_on_windows(self):
self.proc.send_signal(signal.SIGTERM)
self.assertNotEqual(self.proc.wait(), 0)
class PidTest(unittest.TestCase):
def testPid(self):
# Cannot use sys.executable here because it's a script and has different
# pid than the actual started Java process.
p = Popen(['python', '-c', 'import os; print os.getpid()'],
stdout=PIPE)
p.wait()
self.assertEquals(int(p.stdout.read()), p.pid)
def testNonExistingField(self):
# Test we don't crash if Process class doesn't have field we need.
p = Popen(['echo foo'], shell=True, stdout=PIPE)
self.assertIsNone(p._get_pid('nonex'))
class EnvironmentInheritanceTest(unittest.TestCase):
def testDefaultEnvIsInherited(self):
# Test for issue #1104
os.environ['foo'] = 'something'
p1 = Popen([sys.executable, "-c",
'import os, sys; sys.stdout.write(os.environ["foo"])'],
stdout=PIPE)
self.assertEquals('something', p1.stdout.read())
class JythonOptsTest(unittest.TestCase):
""" Tests for (some parts of) issue #1187: JYTHON_OPTS should not be
enriched by arguments
"""
def testNoJythonOpts(self):
os.environ['JYTHON_OPTS'] = ''
p1 = Popen([sys.executable, "-c",
'import os, sys; sys.stdout.write(os.environ["JYTHON_OPTS"])'],
stdout=PIPE)
self.assertEquals('', p1.stdout.read())
def testExistingJythonOpts(self):
options = '-Qold -Qwarn'
os.environ['JYTHON_OPTS'] = options
p1 = Popen([sys.executable, "-c",
'import os, sys; sys.stdout.write(os.environ["JYTHON_OPTS"])'],
stdout=PIPE)
self.assertEquals(options, p1.stdout.read())
class Cmdline2ListTestCase(unittest.TestCase):
cmdlines = {
# From "Parsing C Command-Line Arguments"
# http://msdn.microsoft.com/en-us/library/a1y7w461(VS.80).aspx
'"a b c" d e': ['a b c', 'd', 'e'],
r'"ab\"c" "\\" d': ['ab"c', '\\', 'd'],
r'a\\\b d"e f"g h': [r'a\\\b', 'de fg', 'h'],
r'a\\\"b c d': [r'a\"b', 'c', 'd'],
r'a\\\\"b c" d e': [r'a\\b c', 'd', 'e'],
r'C:\\foo\bar\baz jy thon': [r'C:\\foo\bar\baz', 'jy', 'thon'],
r'C:\\Program Files\Foo\Bar qu \\ ux':
[r'C:\\Program', 'Files\Foo\Bar', 'qu', '\\\\', 'ux'],
r'"C:\\Program Files\Foo\Bar" qu \\ ux':
[r'C:\\Program Files\Foo\Bar', 'qu', '\\\\', 'ux'],
r'dir "C:\\Program Files\Foo\\" bar':
['dir', 'C:\\\\Program Files\\Foo\\', 'bar'],
r'echo "\"I hate Windows!\""': ['echo', '"I hate Windows!"'],
r'print "jython" "': ['print', 'jython', ''],
r'print \"jython\" \"': ['print', '"jython"', '"'],
r'print \"jython\" \\"': ['print', '"jython"', '\\']
}
def test_cmdline2list(self):
for cmdline, argv in self.cmdlines.iteritems():
self.assertEqual(_cmdline2list(cmdline), argv)
def test_main():
test_support.run_unittest(
TerminationAndSignalTest,
PidTest,
EnvironmentInheritanceTest,
JythonOptsTest,
Cmdline2ListTestCase)
if __name__ == '__main__':
test_main()
|
mit
|
open-synergy/server-tools
|
auth_brute_force/models/res_banned_remote.py
|
16
|
2544
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Tracks Authentication Attempts and Prevents Brute-force Attacks module
# Copyright (C) 2015-Today GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import urllib
import json
from openerp import models, fields, api
class ResBannedRemote(models.Model):
_name = 'res.banned.remote'
_rec_name = 'remote'
_GEOLOCALISATION_URL = "http://ip-api.com/json/{}"
# Default Section
def _default_ban_date(self):
return fields.Datetime.now()
# Column Section
description = fields.Text(
string='Description', compute='_compute_description', store=True)
ban_date = fields.Datetime(
string='Ban Date', required=True, default=_default_ban_date)
remote = fields.Char(string='Remote ID', required=True)
active = fields.Boolean(
string='Active', help="Uncheck this box to unban the remote",
default=True)
attempt_ids = fields.Many2many(
comodel_name='res.authentication.attempt', string='Attempts',
compute='_compute_attempt_ids')
# Compute Section
@api.multi
@api.depends('remote')
def _compute_description(self):
for item in self:
url = self._GEOLOCALISATION_URL.format(item.remote)
res = json.loads(urllib.urlopen(url).read())
item.description = ''
for k, v in res.iteritems():
item.description += '%s : %s\n' % (k, v)
@api.multi
def _compute_attempt_ids(self):
for item in self:
attempt_obj = self.env['res.authentication.attempt']
item.attempt_ids = attempt_obj.search_last_failed(item.remote).ids
|
agpl-3.0
|
farhaanbukhsh/sympy
|
sympy/core/tests/test_containers.py
|
49
|
5736
|
from sympy import Matrix, Tuple, symbols, sympify, Basic, Dict, S, FiniteSet, Integer
from sympy.core.containers import tuple_wrapper
from sympy.utilities.pytest import raises
from sympy.core.compatibility import is_sequence, iterable, u, range
def test_Tuple():
t = (1, 2, 3, 4)
st = Tuple(*t)
assert set(sympify(t)) == set(st)
assert len(t) == len(st)
assert set(sympify(t[:2])) == set(st[:2])
assert isinstance(st[:], Tuple)
assert st == Tuple(1, 2, 3, 4)
assert st.func(*st.args) == st
p, q, r, s = symbols('p q r s')
t2 = (p, q, r, s)
st2 = Tuple(*t2)
assert st2.atoms() == set(t2)
assert st == st2.subs({p: 1, q: 2, r: 3, s: 4})
# issue 5505
assert all(isinstance(arg, Basic) for arg in st.args)
assert Tuple(p, 1).subs(p, 0) == Tuple(0, 1)
assert Tuple(p, Tuple(p, 1)).subs(p, 0) == Tuple(0, Tuple(0, 1))
assert Tuple(t2) == Tuple(Tuple(*t2))
assert Tuple.fromiter(t2) == Tuple(*t2)
assert Tuple.fromiter(x for x in range(4)) == Tuple(0, 1, 2, 3)
assert st2.fromiter(st2.args) == st2
def test_Tuple_contains():
t1, t2 = Tuple(1), Tuple(2)
assert t1 in Tuple(1, 2, 3, t1, Tuple(t2))
assert t2 not in Tuple(1, 2, 3, t1, Tuple(t2))
def test_Tuple_concatenation():
assert Tuple(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)
assert (1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)
assert Tuple(1, 2) + (3, 4) == Tuple(1, 2, 3, 4)
raises(TypeError, lambda: Tuple(1, 2) + 3)
raises(TypeError, lambda: 1 + Tuple(2, 3))
#the Tuple case in __radd__ is only reached when a subclass is involved
class Tuple2(Tuple):
def __radd__(self, other):
return Tuple.__radd__(self, other + other)
assert Tuple(1, 2) + Tuple2(3, 4) == Tuple(1, 2, 1, 2, 3, 4)
assert Tuple2(1, 2) + Tuple(3, 4) == Tuple(1, 2, 3, 4)
def test_Tuple_equality():
assert Tuple(1, 2) is not (1, 2)
assert (Tuple(1, 2) == (1, 2)) is True
assert (Tuple(1, 2) != (1, 2)) is False
assert (Tuple(1, 2) == (1, 3)) is False
assert (Tuple(1, 2) != (1, 3)) is True
assert (Tuple(1, 2) == Tuple(1, 2)) is True
assert (Tuple(1, 2) != Tuple(1, 2)) is False
assert (Tuple(1, 2) == Tuple(1, 3)) is False
assert (Tuple(1, 2) != Tuple(1, 3)) is True
def test_Tuple_comparision():
assert (Tuple(1, 3) >= Tuple(-10, 30)) is S.true
assert (Tuple(1, 3) <= Tuple(-10, 30)) is S.false
assert (Tuple(1, 3) >= Tuple(1, 3)) is S.true
assert (Tuple(1, 3) <= Tuple(1, 3)) is S.true
def test_Tuple_tuple_count():
assert Tuple(0, 1, 2, 3).tuple_count(4) == 0
assert Tuple(0, 4, 1, 2, 3).tuple_count(4) == 1
assert Tuple(0, 4, 1, 4, 2, 3).tuple_count(4) == 2
assert Tuple(0, 4, 1, 4, 2, 4, 3).tuple_count(4) == 3
def test_Tuple_index():
assert Tuple(4, 0, 1, 2, 3).index(4) == 0
assert Tuple(0, 4, 1, 2, 3).index(4) == 1
assert Tuple(0, 1, 4, 2, 3).index(4) == 2
assert Tuple(0, 1, 2, 4, 3).index(4) == 3
assert Tuple(0, 1, 2, 3, 4).index(4) == 4
raises(ValueError, lambda: Tuple(0, 1, 2, 3).index(4))
raises(ValueError, lambda: Tuple(4, 0, 1, 2, 3).index(4, 1))
raises(ValueError, lambda: Tuple(0, 1, 2, 3, 4).index(4, 1, 4))
def test_Tuple_mul():
assert Tuple(1, 2, 3)*2 == Tuple(1, 2, 3, 1, 2, 3)
assert 2*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3)
assert Tuple(1, 2, 3)*Integer(2) == Tuple(1, 2, 3, 1, 2, 3)
assert Integer(2)*Tuple(1, 2, 3) == Tuple(1, 2, 3, 1, 2, 3)
raises(TypeError, lambda: Tuple(1, 2, 3)*S.Half)
raises(TypeError, lambda: S.Half*Tuple(1, 2, 3))
def test_tuple_wrapper():
@tuple_wrapper
def wrap_tuples_and_return(*t):
return t
p = symbols('p')
assert wrap_tuples_and_return(p, 1) == (p, 1)
assert wrap_tuples_and_return((p, 1)) == (Tuple(p, 1),)
assert wrap_tuples_and_return(1, (p, 2), 3) == (1, Tuple(p, 2), 3)
def test_iterable_is_sequence():
ordered = [list(), tuple(), Tuple(), Matrix([[]])]
unordered = [set()]
not_sympy_iterable = [{}, '', u('')]
assert all(is_sequence(i) for i in ordered)
assert all(not is_sequence(i) for i in unordered)
assert all(iterable(i) for i in ordered + unordered)
assert all(not iterable(i) for i in not_sympy_iterable)
assert all(iterable(i, exclude=None) for i in not_sympy_iterable)
def test_Dict():
x, y, z = symbols('x y z')
d = Dict({x: 1, y: 2, z: 3})
assert d[x] == 1
assert d[y] == 2
raises(KeyError, lambda: d[2])
assert len(d) == 3
assert set(d.keys()) == set((x, y, z))
assert set(d.values()) == set((S(1), S(2), S(3)))
assert d.get(5, 'default') == 'default'
assert x in d and z in d and not 5 in d
assert d.has(x) and d.has(1) # SymPy Basic .has method
# Test input types
# input - a python dict
# input - items as args - SymPy style
assert (Dict({x: 1, y: 2, z: 3}) ==
Dict((x, 1), (y, 2), (z, 3)))
raises(TypeError, lambda: Dict(((x, 1), (y, 2), (z, 3))))
with raises(NotImplementedError):
d[5] = 6 # assert immutability
assert set(
d.items()) == set((Tuple(x, S(1)), Tuple(y, S(2)), Tuple(z, S(3))))
assert set(d) == set([x, y, z])
assert str(d) == '{x: 1, y: 2, z: 3}'
assert d.__repr__() == '{x: 1, y: 2, z: 3}'
# Test creating a Dict from a Dict.
d = Dict({x: 1, y: 2, z: 3})
assert d == Dict(d)
def test_issue_5788():
args = [(1, 2), (2, 1)]
for o in [Dict, Tuple, FiniteSet]:
# __eq__ and arg handling
if o != Tuple:
assert o(*args) == o(*reversed(args))
pair = [o(*args), o(*reversed(args))]
assert sorted(pair) == sorted(reversed(pair))
assert set(o(*args)) # doesn't fail
|
bsd-3-clause
|
neuromancer/ocean
|
src/Run.py
|
1
|
2933
|
# -- coding: utf-8 --
#from ptrace.debugger.child import createChild
from os import system, dup2, close, open as fopen, O_RDONLY
from sys import stdin
from os import (
fork, execv, execve, getpid,
close, dup2, devnull, O_RDONLY)
from ptrace.binding import ptrace_traceme
from ptrace import PtraceError
from resource import getrlimit, setrlimit, RLIMIT_AS
fds = []
c = 0
class ChildError(RuntimeError):
pass
def _execChild(arguments, no_stdout, env):
if no_stdout:
try:
null = open(devnull, 'wb')
dup2(null.fileno(), 1)
dup2(1, 2)
null.close()
except IOError, err:
close(2)
close(1)
try:
if env is not None:
execve(arguments[0], arguments, env)
else:
execv(arguments[0], arguments)
except Exception, err:
raise ChildError(str(err))
def createChild(arguments, no_stdout, env=None):
"""
Create a child process:
- arguments: list of string where (eg. ['ls', '-la'])
- no_stdout: if True, use null device for stdout/stderr
- env: environment variables dictionary
Use:
- env={} to start with an empty environment
- env=None (default) to copy the environment
"""
# Fork process
pid = fork()
if pid:
return pid
else:
#print "limit",getrlimit(RLIMIT_DATA)
setrlimit(RLIMIT_AS, (1024*1024*1024, -1))
#print "limit",getrlimit(RLIMIT_DATA)
try:
ptrace_traceme()
except PtraceError, err:
raise ChildError(str(err))
_execChild(arguments, no_stdout, env)
exit(255)
def Launch(cmd, no_stdout, env):
global fds
global c
c = c + 1
#cmd = ["/usr/bin/timeout", "-k", "1", "3"]+cmd
#print cmd
if cmd[-1][0:2] == "< ":
filename = cmd[-1].replace("< ", "")
#try:
# close(3)
#except OSError:
# print "OsError!"
# pass
for fd in fds:
#print fd,
try:
close(fd)
#print "closed!"
except OSError:
#print "failed close!"
pass
fds = []
desc = fopen(filename,O_RDONLY)
fds.append(desc)
dup2(desc, stdin.fileno())
fds.append(desc)
#close(desc)
cmd = cmd[:-1]
#print "c:", c
#print "self pid", getpid()
r = createChild(cmd, no_stdout, env)
#print "new pid", r
#print "self pid", getpid()
#print "Done!"
return r
#class Runner:
# def __init__(self, cmd, timeout):
# #threading.Thread.__init__(self)
#
# self.cmd = cmd
# self.timeout = timeout
#
# def Run(self):
# #print self.cmd
# self.p = subprocess.call(self.cmd, shell=False)
# #self.p.wait()
# #self.join(self.timeout)
#
# #if self.is_alive():
# #print "terminate: ", self.p.pid
# #self.p.kill()
# #self.join()
# #return True
# return True
|
gpl-3.0
|
orekyuu/intellij-community
|
python/helpers/profiler/thrift/transport/TSSLSocket.py
|
121
|
7717
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import socket
import ssl
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error, e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error, e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION)
except ssl.SSLError, ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
|
apache-2.0
|
ericls/niji
|
niji/urls.py
|
1
|
1889
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.views.decorators.csrf import csrf_exempt
from rest_framework import routers
from . import api
from . import views
api_router = routers.DefaultRouter()
api_router.register(r'topics', api.TopicApiView)
api_router.register(r'post', api.PostApiView)
urlpatterns = [
url(r'^page/(?P<page>[0-9]+)/$', views.Index.as_view(), name='index'),
url(r'^$', views.Index.as_view(), name='index'),
url(r'^n/(?P<pk>\d+)/page/(?P<page>[0-9]+)/$', views.NodeView.as_view(), name='node'),
url(r'^n/(?P<pk>\d+)/$', views.NodeView.as_view(), name='node'),
url(r'^t/(?P<pk>\d+)/edit/$', views.edit_topic, name='edit_topic'),
url(r'^t/(?P<pk>\d+)/append/$', views.create_appendix, name='create_appendix'),
url(r'^t/(?P<pk>\d+)/page/(?P<page>[0-9]+)/$', views.TopicView.as_view(), name='topic'),
url(r'^t/(?P<pk>\d+)/$', views.TopicView.as_view(), name='topic'),
url(r'^u/(?P<pk>\d+)/$', views.user_info, name='user_info'),
url(r'^u/(?P<pk>\d+)/topics/page/(?P<page>[0-9]+)/$', views.UserTopics.as_view(), name='user_topics'),
url(r'^u/(?P<pk>\d+)/topics/$', views.UserTopics.as_view(), name='user_topics'),
url(r'^login/$', views.login_view, name='login'),
url(r'^reg/$', views.reg_view, name='reg'),
url(r'^logout/$', views.logout_view, name="logout"),
url(r'^search/$', views.search_redirect, name='search_redirect'),
url(r'^search/(?P<keyword>.*?)/page/(?P<page>[0-9]+)/$', views.SearchView.as_view(), name='search'),
url(r'^search/(?P<keyword>.*?)/$', views.SearchView.as_view(), name='search'),
url(r'^t/create/$', views.create_topic, name='create_topic'),
url(r'^notifications/$', views.NotificationView.as_view(), name='notifications'),
url(r'^avatar/$', views.upload_avatar, name="upload_avatar"),
url(r'^api/', include(api_router.urls)),
]
|
mit
|
adityacs/ansible
|
lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py
|
8
|
5589
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Status of sslkeyandcertificate.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of sslkeyandcertificate.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
'''
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str',),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
|
gpl-3.0
|
spektom/incubator-airflow
|
tests/providers/amazon/aws/operators/test_sqs.py
|
4
|
2353
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import MagicMock
from moto import mock_sqs
from airflow import DAG
from airflow.providers.amazon.aws.hooks.sqs import SQSHook
from airflow.providers.amazon.aws.operators.sqs import SQSPublishOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2019, 1, 1)
class TestSQSPublishOperator(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
self.operator = SQSPublishOperator(
task_id='test_task',
dag=self.dag,
sqs_queue='test',
message_content='hello',
aws_conn_id='aws_default'
)
self.mock_context = MagicMock()
self.sqs_hook = SQSHook()
@mock_sqs
def test_execute_success(self):
self.sqs_hook.create_queue('test')
result = self.operator.execute(self.mock_context)
self.assertTrue('MD5OfMessageBody' in result)
self.assertTrue('MessageId' in result)
message = self.sqs_hook.get_conn().receive_message(QueueUrl='test')
self.assertEqual(len(message['Messages']), 1)
self.assertEqual(message['Messages'][0]['MessageId'], result['MessageId'])
self.assertEqual(message['Messages'][0]['Body'], 'hello')
context_calls = []
self.assertTrue(self.mock_context['ti'].method_calls == context_calls, "context call should be same")
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
anaselli/dnfdragora
|
dnfdragora/compsicons.py
|
1
|
3731
|
import os.path
class CompsIcons:
'''
This class manages the access to group name and icons
'''
def __init__(self, rpm_groups, icon_path=None):
if icon_path:
self.icon_path = icon_path if icon_path.endswith("/") else icon_path + "/"
else:
self.icon_path = "/usr/share/pixmaps/comps/"
self.default_icon = self.icon_path + "uncategorized.png"
# workaround for https://github.com/timlau/dnf-daemon/issues/9
# generated using tools/gen-comps-category-list.sh
self._group_info = {
"KDE Desktop": {"title": _("KDE Desktop"), "icon" :"kde-desktop-environment.png"},
"Xfce Desktop": {"title": _("Xfce Desktop"), "icon" :"xfce-desktop-environment.png"},
"Applications": {"title": _("Applications"), "icon" :"apps.png"},
"LXDE Desktop": {"title": _("LXDE Desktop"), "icon" :"lxde-desktop-environment.png"},
"LXQt Desktop": {"title": _("LXQt Desktop"), "icon" :"lxqt-desktop-environment.png"},
"Cinnamon Desktop": {"title": _("Cinnamon Desktop"), "icon" :"cinnamon-desktop-environment.png"},
"MATE Desktop": {"title": _("MATE Desktop"), "icon" :"mate-desktop-environment.png"},
"Hawaii Desktop": {"title": _("Hawaii Desktop"), "icon" :"hawaii-desktop-environment.png"},
"Sugar Desktop Environment": {"title": _("Sugar Desktop Environment"), "icon" :"sugar-desktop-environment.png"},
"GNOME Desktop": {"title": _("GNOME Desktop"), "icon" :"gnome-desktop-environment.png"},
"Development": {"title": _("Development"), "icon" :"development.png"},
"Servers": {"title": _("Servers"), "icon" :"servers.png"},
"Base System": {"title": _("Base System"), "icon" :"base-system.png"},
"Content": {"title": _("Content"), "icon" :"content.png"},
}
self._getID_to_map(rpm_groups, self._group_info)
# adding special groups
if not 'All' in self._group_info.keys():
self._group_info['All'] = {"title" : _("All")}
if not 'Empty' in self._group_info.keys():
self._group_info['Empty'] = {"title" : _("Empty")}
if not 'Search' in self._group_info.keys():
self._group_info['Search'] = {"title" : _("Search result")}
def _getID_to_map(self, groups, group_info, g_id=None) :
'''
return id_to_name_map at run time
'''
gid = g_id
for gl in groups:
if (isinstance(gl, list)):
if (type(gl[0]) is str) :
if not gid:
if not gl[0] in group_info.keys():
group_info[gl[0]] = { "title": gl[1], 'icon': gl[0] + ".png"}
gid = gl[0]
else:
if not gl[0] in group_info[gid].keys():
group_info[gid][gl[0]] = { "title": gl[1], 'icon': gl[0] + ".png"}
else :
self._getID_to_map(gl, group_info, gid)
@property
def groups(self):
'''
return all the group info
'''
return self._group_info
def icon(self, group_path):
group_names = group_path.split("/")
for group_name in reversed(group_names):
icon_name = group_name + ".png"
if group_name in self._group_info.keys():
if ('icon' in self._group_info[group_name].keys()):
icon_name = self._group_info[group_name]['icon']
icon_pathname = self.icon_path + icon_name
if os.path.exists(icon_pathname):
return icon_pathname
return self.default_icon
|
gpl-3.0
|
ubc/edx-ora2
|
openassessment/assessment/models/ai.py
|
5
|
30773
|
"""
Database models for AI assessment.
"""
from uuid import uuid4
import json
import logging
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.cache import cache, get_cache
from django.db import models, transaction, DatabaseError
from django.utils.timezone import now
from django_extensions.db.fields import UUIDField
from dogapi import dog_stats_api
from submissions import api as sub_api
from .base import Rubric, Criterion, Assessment, AssessmentPart
from .training import TrainingExample
AI_ASSESSMENT_TYPE = "AI"
logger = logging.getLogger(__name__)
# Use an in-memory cache to hold classifier data, but allow settings to override this.
# The classifier data will generally be larger than memcached's default max size
CLASSIFIERS_CACHE_IN_MEM = getattr(
settings, 'ORA2_CLASSIFIERS_CACHE_IN_MEM',
get_cache(
'django.core.cache.backends.locmem.LocMemCache',
LOCATION='openassessment.ai.classifiers_dict'
)
)
CLASSIFIERS_CACHE_IN_FILE = getattr(
settings, 'ORA2_CLASSIFIERS_CACHE_IN_FILE',
get_cache(
'django.core.cache.backends.filebased.FileBasedCache',
LOCATION='/tmp/ora2_classifier_cache'
)
)
def essay_text_from_submission(submission):
"""
Retrieve the submission text.
Submissions are arbitrary JSON-blobs, which *should*
contain a single key, "answer", containing the essay
submission text.
If not, though, assume we've been given the essay text
directly (convenient for testing).
"""
if isinstance(submission, dict):
if 'answer' in submission:
# Format used for answer in examples.
if isinstance(submission['answer'], unicode):
return submission['answer']
# Initially there was one prompt and submission had the structure
# {'answer': {'text': 'The text.'}}
elif 'text' in submission['answer']:
essay_text = submission['answer']['text']
# When multiple prompts were introduced the structure of submission become:
# {'answer': {'parts': [{'text': 'The text part 1.'}, {'text': 'The text part 2.'}]}}
# We concatenate these parts and let AI grader evaluate the total text.
else:
essay_text = u'\n'.join([part['text'] for part in submission['answer']['parts']])
else:
essay_text = unicode(submission)
return essay_text
class IncompleteClassifierSet(Exception):
"""
The classifier set is missing a classifier for a criterion in the rubric.
"""
def __init__(self, missing_criteria):
"""
Construct an error message that explains which criteria were missing.
Args:
missing_criteria (list): The list of criteria names that were missing.
"""
msg = (
u"Missing classifiers for the following "
u"criteria: {missing}"
).format(missing=missing_criteria)
super(IncompleteClassifierSet, self).__init__(msg)
class ClassifierUploadError(Exception):
"""
An error occurred while uploading classifier data.
"""
pass
class ClassifierSerializeError(Exception):
"""
An error occurred while serializing classifier data.
"""
pass
class NoTrainingExamples(Exception):
"""
No training examples were provided to the workflow.
"""
def __init__(self, workflow_uuid=None):
msg = u"No training examples were provided"
if workflow_uuid is not None:
msg = u"{msg} to the training workflow with UUID {uuid}".format(
msg=msg, uuid=workflow_uuid
)
super(NoTrainingExamples, self).__init__(msg)
class AIClassifierSet(models.Model):
"""
A set of trained classifiers (immutable).
"""
class Meta:
app_label = "assessment"
ordering = ['-created_at', '-id']
# The rubric associated with this set of classifiers
# We should have one classifier for each of the criteria in the rubric.
rubric = models.ForeignKey(Rubric, related_name="+")
# Timestamp for when the classifier set was created.
# This allows us to find the most recently trained set of classifiers.
created_at = models.DateTimeField(default=now, db_index=True)
# The ID of the algorithm that was used to train classifiers in this set.
algorithm_id = models.CharField(max_length=128, db_index=True)
# Course Entity and Item Discriminator
# Though these items are duplicated in the database tables for the AITrainingWorkflow,
# this is okay because it will drastically speed up the operation of assigning classifiers
# to AIGradingWorkflows
course_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
@classmethod
@transaction.commit_on_success
def create_classifier_set(cls, classifiers_dict, rubric, algorithm_id, course_id, item_id):
"""
Create a set of classifiers.
Args:
classifiers_dict (dict): Mapping of criterion names to
JSON-serializable classifiers.
rubric (Rubric): The rubric model.
algorithm_id (unicode): The ID of the algorithm used to train the classifiers.
course_id (unicode): The ID of the course that the classifier is going to be grading
item_id (unicode): The item within the course that the classifier is trained to grade.
Returns:
AIClassifierSet
Raises:
ClassifierSerializeError
ClassifierUploadError
InvalidRubricSelection
DatabaseError
"""
# Create the classifier set
classifier_set = cls.objects.create(
rubric=rubric, algorithm_id=algorithm_id, item_id=item_id, course_id=course_id
)
# Retrieve the criteria for this rubric,
# then organize them by criterion name
try:
rubric_index = rubric.index
except DatabaseError as ex:
msg = (
u"An unexpected error occurred while retrieving rubric criteria with the"
u"rubric hash {rh} and algorithm_id {aid}: {ex}"
).format(rh=rubric.content_hash, aid=algorithm_id, ex=ex)
logger.exception(msg)
raise
# Check that we have classifiers for all criteria in the rubric
# Ignore criteria that have no options: since these have only written feedback,
# we can't assign them a score.
all_criteria = set(classifiers_dict.keys())
all_criteria |= set(
criterion.name for criterion in
rubric_index.find_criteria_without_options()
)
missing_criteria = rubric_index.find_missing_criteria(all_criteria)
if missing_criteria:
raise IncompleteClassifierSet(missing_criteria)
# Create classifiers for each criterion
for criterion_name, classifier_data in classifiers_dict.iteritems():
classifier = AIClassifier.objects.create(
classifier_set=classifier_set,
criterion=rubric_index.find_criterion(criterion_name)
)
# Serialize the classifier data and upload
try:
contents = ContentFile(json.dumps(classifier_data))
except (TypeError, ValueError, UnicodeDecodeError) as ex:
msg = (
u"Could not serialize classifier data as JSON: {ex}"
).format(ex=ex)
raise ClassifierSerializeError(msg)
filename = uuid4().hex
try:
classifier.classifier_data.save(filename, contents)
except Exception as ex:
full_filename = upload_to_path(classifier, filename)
msg = (
u"Could not upload classifier data to {filename}: {ex}"
).format(filename=full_filename, ex=ex)
raise ClassifierUploadError(msg)
return classifier_set
@classmethod
def most_recent_classifier_set(cls, rubric, algorithm_id, course_id, item_id):
"""
Finds the most relevant classifier set based on the following line of succession:
1 -- Classifier sets with the same COURSE, ITEM, RUBRIC *content* hash, and ALGORITHM
- Newest first. If none exist...
2 -- Classifier sets with the same COURSE, ITEM, and RUBRIC *structure* hash, and ALGORITHM.
- Newest first. If none exist...
3 -- The newest classifier set with the same RUBRIC and ALGORITHM
- Newest first. If none exist...
4 -- Do no assignment and return False
Case #1 is ideal: we get a classifier set trained for the rubric as currently defined.
Case #2 handles when a course author makes a cosmetic change to a rubric after training.
We don't want to stop grading students because an author fixed a typo!
Case #3 handles problems that are duplicated, such as the default problem prompt.
If we've already trained classifiers for the identical rubric somewhere else,
then the author can use them to test out the feature immediately.
Case #4: Someone will need to schedule training; however, we will still accept
student submissions and grade them once training completes.
Args:
rubric (Rubric): The rubric associated with the classifier set.
algorithm_id (unicode): The algorithm used to create the classifier set.
course_id (unicode): The course identifier for the current problem.
item_id (unicode): The item identifier for the current problem.
Returns:
ClassifierSet or None
Raises:
DatabaseError
"""
# List of the parameters we will search for, in order of decreasing priority
search_parameters = [
# Case #1: same course / item / rubric (exact) / algorithm
{
'rubric__content_hash': rubric.content_hash,
'algorithm_id': algorithm_id,
'course_id': course_id,
'item_id': item_id
},
# Case #2: same course / item / rubric (structure only) / algorithm
{
'rubric__structure_hash': rubric.structure_hash, # pylint: disable=E1101
'algorithm_id': algorithm_id,
'course_id': course_id,
'item_id': item_id
},
# Case #3: same rubric (exact) / algorithm
{
'rubric__content_hash': rubric.content_hash,
'algorithm_id': algorithm_id
}
]
# Perform each query, starting with the highest priority
for params in search_parameters:
# Retrieve the most recent classifier set that matches our query
# (rely on implicit ordering in the model definition)
classifier_set_candidates = cls.objects.filter(**params)[:1]
# If we find a classifier set,
# then associate the most recent classifiers with it and return true
if len(classifier_set_candidates) > 0:
return classifier_set_candidates[0]
# If we get to this point, no classifiers exist with this rubric and algorithm.
return None
@property
def classifier_data_by_criterion(self):
"""
Return info for all classifiers in this classifier set in a dictionary
that maps criteria names to classifier data.
Returns:
dict: keys are criteria names, values are JSON-serializable classifier data
Raises:
ValueError
IOError
httplib.HTTPException
"""
# First check the in-memory cache
# We use an in-memory cache because the classifier data will most often
# be several megabytes, which exceeds the default memcached size limit.
# If we find it, we can avoid calls to the database, S3, and json.
cache_key = self._cache_key("classifier_data_by_criterion")
classifiers_dict = CLASSIFIERS_CACHE_IN_MEM.get(cache_key)
# If we can't find the classifier in-memory, check the filesystem cache
# We can't always rely on the in-memory cache because worker processes
# terminate when max retries are exceeded.
if classifiers_dict is None:
msg = (
u"Could not find classifiers dict in the in-memory "
u"cache for key {key}. Falling back to the file-based cache."
).format(key=cache_key)
logger.info(msg)
classifiers_dict = CLASSIFIERS_CACHE_IN_FILE.get(cache_key)
else:
msg = (
u"Found classifiers dict in the in-memory cache "
u"(cache key was {key})"
).format(key=cache_key)
logger.info(msg)
# If we can't find the classifiers dict in the cache,
# we need to look up the classifiers in the database,
# then download the classifier data.
if classifiers_dict is None:
classifiers_dict = {
classifier.criterion.name: classifier.download_classifier_data()
for classifier in self.classifiers.select_related().all() # pylint: disable=E1101
}
CLASSIFIERS_CACHE_IN_MEM.set(cache_key, classifiers_dict)
CLASSIFIERS_CACHE_IN_FILE.set(cache_key, classifiers_dict)
msg = (
u"Could not find classifiers dict in either the in-memory "
u"or file-based cache. Downloaded the data from S3 and cached "
u"it using key {key}"
).format(key=cache_key)
logger.info(msg)
return classifiers_dict
@property
def valid_scores_by_criterion(self):
"""
Return the valid scores for each classifier in this classifier set.
Returns:
dict: maps rubric criterion names to lists of valid scores.
"""
cache_key = self._cache_key("valid_scores_by_criterion")
valid_scores_by_criterion = cache.get(cache_key)
if valid_scores_by_criterion is None:
valid_scores_by_criterion = {
classifier.criterion.name: classifier.valid_scores
for classifier in self.classifiers.select_related().all() # pylint: disable=E1101
}
cache.set(cache_key, valid_scores_by_criterion)
return valid_scores_by_criterion
def _cache_key(self, data_name):
"""
Return a cache key for this classifier set.
Args:
data_name (unicode): Name for the data associated with this key.
Returns:
unicode
"""
return u"openassessment.assessment.ai.classifier_set.{pk}.{data_name}".format(
pk=self.pk, data_name=data_name
)
# Directory in which classifiers will be stored
# For instance, if we're using the default file system storage backend
# for local development, this will be a subdirectory.
# If using an S3 storage backend, this will be a subdirectory in
# an AWS S3 bucket.
AI_CLASSIFIER_STORAGE = "ora2_ai_classifiers"
def upload_to_path(instance, filename): # pylint:disable=W0613
"""
Calculate the file path where classifiers should be uploaded.
Optionally prepends the path with a prefix (determined by Django settings).
This allows us to put classifiers from different environments
(stage / prod) in different directories within the same S3 bucket.
Args:
instance (AIClassifier): Not used.
filename (unicode): The filename provided when saving the file.
Returns:
unicode
"""
prefix = getattr(settings, 'ORA2_FILE_PREFIX', None)
if prefix is not None:
return u"{prefix}/{root}/{filename}".format(
prefix=prefix,
root=AI_CLASSIFIER_STORAGE,
filename=filename
)
else:
return u"{root}/{filename}".format(
root=AI_CLASSIFIER_STORAGE,
filename=filename
)
class AIClassifier(models.Model):
"""
A trained classifier (immutable).
"""
class Meta:
app_label = "assessment"
# The set of classifiers this classifier belongs to
classifier_set = models.ForeignKey(AIClassifierSet, related_name="classifiers")
# The criterion (in the rubric) that this classifier evaluates.
criterion = models.ForeignKey(Criterion, related_name="+")
# The serialized classifier
# Because this may be large, we store it using a Django `FileField`,
# which allows us to plug in different storage backends (such as S3)
classifier_data = models.FileField(upload_to=upload_to_path)
def download_classifier_data(self):
"""
Download and deserialize the classifier data.
Returns:
JSON-serializable
Raises:
ValueError
IOError
httplib.HTTPException
"""
return json.loads(self.classifier_data.read()) # pylint:disable=E1101
@property
def valid_scores(self):
"""
Return a list of valid scores for the rubric criterion associated
with this classifier.
Returns:
list of integer scores, in ascending order.
"""
return sorted([option.points for option in self.criterion.options.all()])
class AIWorkflow(models.Model):
"""
Abstract base class for AI workflow database models.
"""
class Meta:
app_label = "assessment"
abstract = True
# Unique identifier used to track this workflow
uuid = UUIDField(version=1, db_index=True, unique=True)
# Course Entity and Item Discriminator
# Though these items are duplicated in the database tables for the submissions app,
# and every workflow has a reference to a submission entry, this is okay because
# submissions are immutable.
course_id = models.CharField(max_length=40, db_index=True)
item_id = models.CharField(max_length=128, db_index=True)
# Timestamps
# The task is *scheduled* as soon as a client asks the API to
# train classifiers.
# The task is *completed* when a worker has successfully created a
# classifier set based on the training examples.
scheduled_at = models.DateTimeField(default=now, db_index=True)
completed_at = models.DateTimeField(null=True, db_index=True)
# The ID of the algorithm used to train the classifiers
# This is a parameter passed to and interpreted by the workers.
# Django settings allow the users to map algorithm ID strings
# to the Python code they should use to perform the training.
algorithm_id = models.CharField(max_length=128, db_index=True)
# The set of trained classifiers.
# In the training task, this field will be set when the task completes successfully.
# In the grading task, this may be set to null if no classifiers are available
# when the student submits an essay for grading.
classifier_set = models.ForeignKey(
AIClassifierSet, related_name='+',
null=True, default=None
)
@property
def is_complete(self):
"""
Check whether the workflow is complete.
Returns:
bool
"""
return self.completed_at is not None
def mark_complete_and_save(self):
"""
Mark the workflow as complete.
Returns:
None
"""
self.completed_at = now()
self.save()
self._log_complete_workflow()
@classmethod
def get_incomplete_workflows(cls, course_id, item_id):
"""
Gets all incomplete grading workflows for a given course and item.
Args:
course_id (unicode): Uniquely identifies the course
item_id (unicode): The discriminator for the item we are looking for
Yields:
All incomplete workflows for this item, as a delayed "stream"
Raises:
DatabaseError
cls.DoesNotExist
"""
# Finds all of the uuid's for workflows contained within the query
grade_workflow_uuids = [
wflow['uuid'] for wflow in cls.objects.filter(
course_id=course_id, item_id=item_id, completed_at__isnull=True
).values('uuid')
]
# Continues to generate output until all workflows in the queryset have been output
for workflow_uuid in grade_workflow_uuids:
# Returns the grading workflow associated with the uuid stored in the initial query
workflow = cls.objects.get(uuid=workflow_uuid)
yield workflow
@classmethod
def is_workflow_complete(cls, workflow_uuid):
"""
Check whether the workflow with a given UUID has been marked complete.
Args:
workflow_uuid (str): The UUID of the workflow to check.
Returns:
bool
Raises:
DatabaseError
cls.DoesNotExist
"""
workflow = cls.objects.get(uuid=workflow_uuid)
return workflow.is_complete
def _log_start_workflow(self):
"""
A logging operation called at the beginning of an AI Workflows life.
Increments the number of tasks of that kind.
"""
# Identifies whether the type of task for reporting
class_name = self.__class__.__name__
data_path = 'openassessment.assessment.ai_task.' + class_name
# Sets identity tags which allow sorting by course and item
tags = [
u"course_id:{course_id}".format(course_id=self.course_id),
u"item_id:{item_id}".format(item_id=self.item_id),
]
logger.info(u"{class_name} with uuid {uuid} was started.".format(class_name=class_name, uuid=self.uuid))
dog_stats_api.increment(data_path + '.scheduled_count', tags=tags)
def _log_complete_workflow(self):
"""
A logging operation called at the end of an AI Workflow's Life
Reports the total time the task took.
"""
# Identifies whether the type of task for reporting
class_name = self.__class__.__name__
data_path = 'openassessment.assessment.ai_task.' + class_name
tags = [
u"course_id:{course_id}".format(course_id=self.course_id),
u"item_id:{item_id}".format(item_id=self.item_id),
]
# Calculates the time taken to complete the task and reports it to datadog
time_delta = self.completed_at - self.scheduled_at
dog_stats_api.histogram(
data_path + '.turnaround_time',
time_delta.total_seconds(),
tags=tags
)
dog_stats_api.increment(data_path + '.completed_count', tags=tags)
logger.info(
(
u"{class_name} with uuid {uuid} completed its workflow successfully "
u"in {seconds} seconds."
).format(class_name=class_name, uuid=self.uuid, seconds=time_delta.total_seconds())
)
class AITrainingWorkflow(AIWorkflow):
"""
Used to track AI training tasks.
Training tasks take as input an algorithm ID and a set of training examples
(which are associated with a rubric).
On successful completion, training tasks output a set of trained classifiers.
"""
class Meta:
app_label = "assessment"
# The training examples (essays + scores) used to train the classifiers.
# This is a many-to-many field because
# (a) we need multiple training examples to train a classifier, and
# (b) we may want to re-use training examples
# (for example, if a training task is executed by Celery workers multiple times)
training_examples = models.ManyToManyField(TrainingExample, related_name="+")
@classmethod
@transaction.commit_on_success
def start_workflow(cls, examples, course_id, item_id, algorithm_id):
"""
Start a workflow to track a training task.
Args:
examples (list of TrainingExample): The training examples used to create the classifiers.
course_id (unicode): The ID for the course that the training workflow is associated with.
item_id (unicode): The ID for the item that the training workflow is training to assess.
algorithm_id (unicode): The ID of the algorithm to use for training.
Returns:
AITrainingWorkflow
Raises:
NoTrainingExamples
"""
if len(examples) == 0:
raise NoTrainingExamples()
workflow = AITrainingWorkflow.objects.create(algorithm_id=algorithm_id, item_id=item_id, course_id=course_id)
workflow.training_examples.add(*examples)
workflow.save()
workflow._log_start_workflow()
return workflow
@property
def rubric(self):
"""
Return the rubric associated with this classifier set.
Returns:
Rubric or None (if no training examples are available)
Raises:
NoTrainingExamples
"""
# We assume that all the training examples we have been provided are using
# the same rubric (this is enforced by the API call that deserializes
# the training examples).
first_example = list(self.training_examples.all()[:1]) # pylint: disable=E1101
if first_example:
return first_example[0].rubric
else:
raise NoTrainingExamples(workflow_uuid=self.uuid)
def complete(self, classifier_set):
"""
Add a classifier set to the workflow and mark it complete.
Args:
classifier_set (dict): Mapping of criteria names to serialized classifiers.
Returns:
None
Raises:
NoTrainingExamples
IncompleteClassifierSet
ClassifierSerializeError
ClassifierUploadError
InvalidRubricSelection
DatabaseError
"""
self.classifier_set = AIClassifierSet.create_classifier_set(
classifier_set, self.rubric, self.algorithm_id, self.course_id, self.item_id
)
self.mark_complete_and_save()
class AIGradingWorkflow(AIWorkflow):
"""
Used to track AI grading tasks.
Grading tasks take as input an essay submission
and a set of classifiers; the tasks select options
for each criterion in the rubric.
"""
class Meta:
app_label = "assessment"
# The UUID of the submission being graded
submission_uuid = models.CharField(max_length=128, db_index=True)
# The text of the essay submission to grade
# We duplicate this here to avoid having to repeatedly look up
# the submission. Since submissions are immutable, this is safe.
essay_text = models.TextField(blank=True)
# The rubric used to evaluate the submission.
# We store this so we can look for classifiers for the same rubric
# if none are available when the workflow is created.
rubric = models.ForeignKey(Rubric, related_name="+")
# The assessment produced by the AI grading algorithm
# Until the task completes successfully, this will be set to null
assessment = models.ForeignKey(
Assessment, related_name="+", null=True, default=None
)
# Identifier information associated with the student's submission
# Useful for finding workflows for a particular course/item/student
# Since submissions are immutable, and since the workflow is
# associated with one submission, it's safe to duplicate
# this information here from the submissions models.
student_id = models.CharField(max_length=40, db_index=True)
def assign_most_recent_classifier_set(self):
"""
Find the most recent classifier set and assign it to this workflow.
Returns:
(bool) indicates whether or not classifiers were able to be assigned to the AIGradingWorkflow
Raises:
DatabaseError
"""
classifier_set = AIClassifierSet.most_recent_classifier_set(
self.rubric, self.algorithm_id, self.course_id, self.item_id
)
if classifier_set is not None:
self.classifier_set = classifier_set
self.save()
return classifier_set is not None
@classmethod
@transaction.commit_on_success
def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
"""
Start a grading workflow.
Args:
submission_uuid (str): The UUID of the submission to grade.
rubric_dict (dict): The serialized rubric model.
algorithm_id (unicode): The ID of the algorithm to use for grading.
Returns:
AIGradingWorkflow
Raises:
SubmissionNotFoundError
SubmissionRequestError
SubmissionInternalError
InvalidRubric
DatabaseError
"""
# Retrieve info about the submission
submission = sub_api.get_submission_and_student(submission_uuid)
# Get or create the rubric
from openassessment.assessment.serializers import rubric_from_dict
rubric = rubric_from_dict(rubric_dict)
# Create the workflow
workflow = cls.objects.create(
submission_uuid=submission_uuid,
essay_text=essay_text_from_submission(submission),
algorithm_id=algorithm_id,
student_id=submission['student_item']['student_id'],
item_id=submission['student_item']['item_id'],
course_id=submission['student_item']['course_id'],
rubric=rubric
)
# Retrieve and assign classifier set candidates
workflow.assign_most_recent_classifier_set()
workflow._log_start_workflow()
return workflow
@transaction.commit_on_success
def complete(self, criterion_scores):
"""
Create an assessment with scores from the AI classifiers
and mark the workflow complete.
Args:
criterion_scores (dict): Dictionary mapping criteria names to integer scores.
Raises:
InvalidRubricSelection
DatabaseError
"""
self.assessment = Assessment.create(
self.rubric, self.algorithm_id, self.submission_uuid, AI_ASSESSMENT_TYPE
)
AssessmentPart.create_from_option_points(self.assessment, criterion_scores)
self.mark_complete_and_save()
|
agpl-3.0
|
TribeMedia/sky_engine
|
build/gyp_helper.py
|
77
|
2220
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file helps gyp_chromium and landmines correctly set up the gyp
# environment from chromium.gyp_env on disk
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.dirname(SCRIPT_DIR)
def apply_gyp_environment_from_file(file_path):
"""Reads in a *.gyp_env file and applies the valid keys to os.environ."""
if not os.path.exists(file_path):
return
with open(file_path, 'rU') as f:
file_contents = f.read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = (
'CC',
'CC_wrapper',
'CC.host_wrapper',
'CHROMIUM_GYP_FILE',
'CHROMIUM_GYP_SYNTAX_CHECK',
'CXX',
'CXX_wrapper',
'CXX.host_wrapper',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_CROSSCOMPILE',
'GYP_GENERATOR_OUTPUT',
'GYP_GENERATORS',
'GYP_INCLUDE_FIRST',
'GYP_INCLUDE_LAST',
'GYP_MSVS_VERSION',
)
for var in supported_vars:
file_val = file_data.get(var)
if file_val:
if var in os.environ:
behavior = 'replaces'
if var == 'GYP_DEFINES':
result = file_val + ' ' + os.environ[var]
behavior = 'merges with, and individual components override,'
else:
result = os.environ[var]
print 'INFO: Environment value for "%s" %s value in %s' % (
var, behavior, os.path.abspath(file_path)
)
string_padding = max(len(var), len(file_path), len('result'))
print ' %s: %s' % (var.rjust(string_padding), os.environ[var])
print ' %s: %s' % (file_path.rjust(string_padding), file_val)
os.environ[var] = result
else:
os.environ[var] = file_val
def apply_chromium_gyp_env():
if 'SKIP_CHROMIUM_GYP_ENV' not in os.environ:
# Update the environment based on chromium.gyp_env
path = os.path.join(os.path.dirname(CHROME_SRC), 'chromium.gyp_env')
apply_gyp_environment_from_file(path)
|
bsd-3-clause
|
olivierdalang/QGIS
|
python/core/additions/validitycheck.py
|
51
|
3120
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
validitycheck.py
---------------------
Date : January 2019
Copyright : (C) 2019 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from qgis._core import (
QgsAbstractValidityCheck,
QgsApplication)
class CheckFactory:
"""
Constructs QgsAbstractValidityChecks using a decorator.
To use, Python based checks should use the decorator syntax:
.. highlight:: python
.. code-block:: python
@check.register(type=QgsAbstractValidityCheck.TypeLayoutCheck)
def my_layout_check(context, feedback):
results = ...
return results
"""
def __init__(self):
# unfortunately /Transfer/ annotation isn't working correct on validityCheckRegistry().addCheck(),
# so we manually need to store a reference to all checks we register
self.checks = []
def register(self, type, *args, **kwargs):
"""
Implements a decorator for registering Python based checks.
:param type: check type, e.g. QgsAbstractValidityCheck.TypeLayoutCheck
"""
def dec(f):
check = CheckWrapper(check_type=type, check_func=f)
self.checks.append(check)
QgsApplication.validityCheckRegistry().addCheck(check)
return dec
class CheckWrapper(QgsAbstractValidityCheck):
"""
Wrapper object used to create new validity checks from @check.
"""
def __init__(self, check_type, check_func):
"""
Initializer for CheckWrapper.
:param check_type: check type, e.g. QgsAbstractValidityCheck.TypeLayoutCheck
:param check_func: test function, should return a list of QgsValidityCheckResult results
"""
super().__init__()
self._check_type = check_type
self._results = []
self._check_func = check_func
def create(self):
return CheckWrapper(check_type=self._check_type, check_func=self._check_func)
def id(self):
return self._check_func.__name__
def checkType(self):
return self._check_type
def prepareCheck(self, context, feedback):
self._results = self._check_func(context, feedback)
if self._results is None:
self._results = []
return True
def runCheck(self, context, feedback):
return self._results
check = CheckFactory()
|
gpl-2.0
|
googleapis/python-asset
|
scripts/fixup_asset_v1p5beta1_keywords.py
|
2
|
6005
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class assetCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'list_assets': ('parent', 'read_time', 'asset_types', 'content_type', 'page_size', 'page_token', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=assetCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the asset client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
apache-2.0
|
sgerhart/ansible
|
lib/ansible/modules/network/nxos/nxos_ntp_auth.py
|
61
|
9574
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If C(state=absent), the module will remove the given key configuration if it exists.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
md5string:
description:
- MD5 String.
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
choices: [ 'false', 'true' ]
default: 'false'
authentication:
description:
- Turns NTP authentication on or off.
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
command = {
'command': command,
'output': 'json',
}
else:
command = {
'command': command,
'output': 'text',
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(command, module)[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (r".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
r"md5\s(?P<md5string>\S+)\s(?P<atype>\S+).*")
body = execute_show_command(command, module)[0]
try:
match_authentication = re.match(auth_regex, body, re.DOTALL)
group_authentication = match_authentication.groupdict()
authentication_key['key_id'] = group_authentication['key_id']
authentication_key['md5string'] = group_authentication['md5string']
if group_authentication['atype'] == '7':
authentication_key['auth_type'] = 'encrypt'
else:
authentication_key['auth_type'] = 'text'
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt':
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
if key_id and md5string:
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
if key_id:
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication:
auth_remove_cmds.append(
'no ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(type='str'),
md5string=dict(type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
if key_id:
if not trusted_key and not md5string:
module.fail_json(msg='trusted_key or md5string MUST be specified')
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, delta.get('auth_type'),
delta.get('trusted_key'), delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
auth_toggle = None
if existing.get('authentication') == 'on':
auth_toggle = True
if not existing.get('key_id'):
key_id = None
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
|
mit
|
pyblish/pyblish-endpoint
|
pyblish_endpoint/vendor/werkzeug/contrib/cache.py
|
146
|
23519
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
The main problem with dynamic Web sites is, well, they're dynamic. Each
time a user requests a page, the webserver executes a lot of code, queries
the database, renders templates until the visitor gets the page he sees.
This is a lot more expensive than just loading a file from the file system
and sending it to the visitor.
For most Web applications, this overhead isn't a big deal but once it
becomes, you will be glad to have a cache system in place.
How Caching Works
=================
Caching is pretty simple. Basically you have a cache object lurking around
somewhere that is connected to a remote cache or the file system or
something else. When the request comes in you check if the current page
is already in the cache and if so, you're returning it from the cache.
Otherwise you generate the page and put it into the cache. (Or a fragment
of the page, you don't have to cache the full thing)
Here is a simple example of how to cache a sidebar for a template::
def get_sidebar(user):
identifier = 'sidebar_for/user%d' % user.id
value = cache.get(identifier)
if value is not None:
return value
value = generate_sidebar_for(user=user)
cache.set(identifier, value, timeout=60 * 5)
return value
Creating a Cache Object
=======================
To create a cache object you just import the cache system of your choice
from the cache module and instantiate it. Then you can start working
with that object:
>>> from werkzeug.contrib.cache import SimpleCache
>>> c = SimpleCache()
>>> c.set("foo", "value")
>>> c.get("foo")
'value'
>>> c.get("missing") is None
True
Please keep in mind that you have to create the cache and put it somewhere
you have access to it (either as a module global you can import or you just
put it into your WSGI application).
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import tempfile
from hashlib import md5
from time import time
try:
import cPickle as pickle
except ImportError:
import pickle
from werkzeug._compat import iteritems, string_types, text_type, \
integer_types, to_bytes
from werkzeug.posixemulation import rename
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, "iteritems"):
return mappingorseq.iteritems()
elif hasattr(mappingorseq, "items"):
return mappingorseq.items()
return mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`set`.
"""
def __init__(self, default_timeout=300):
self.default_timeout = default_timeout
def get(self, key):
"""Looks up key in the cache and returns the value for it.
If the key does not exist `None` is returned instead.
:param key: the key to be looked up.
"""
return None
def delete(self, key):
"""Deletes `key` from the cache. If it does not exist in the cache
nothing happens.
:param key: the key to delete.
"""
pass
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created. Example::
foo, bar = cache.get_many("foo", "bar")
If a key can't be looked up `None` is returned for that key
instead.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Works like :meth:`get_many` but returns a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(zip(keys, self.get_many(*keys)))
def set(self, key, value, timeout=None):
"""Adds a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
pass
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified.
"""
pass
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
for key, value in _items(mapping):
self.set(key, value, timeout)
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
"""
pass
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
"""
self.set(key, (self.get(key) or 0) + delta)
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
"""
self.set(key, (self.get(key) or 0) - delta)
class NullCache(BaseCache):
"""A cache that doesn't cache. This can be useful for unit testing.
:param default_timeout: a dummy parameter that is ignored but exists
for API compatibility with other caches.
"""
class SimpleCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
"""
def __init__(self, threshold=500, default_timeout=300):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if expires <= now or idx % 3 == 0:
self._cache.pop(key, None)
def get(self, key):
expires, value = self._cache.get(key, (0, None))
if expires > time():
return pickle.loads(value)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
self._prune()
self._cache[key] = (time() + timeout, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if len(self._cache) > self._threshold:
self._prune()
item = (time() + timeout, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
self._cache.setdefault(key, item)
def delete(self, key):
self._cache.pop(key, None)
_test_memcached_key = re.compile(br'[^\x00-\x21\xff]{1,250}$').match
class MemcachedCache(BaseCache):
"""A cache that uses memcached as backend.
The first argument can either be an object that resembles the API of a
:class:`memcache.Client` or a tuple/list of server addresses. In the
event that a tuple/list is passed, Werkzeug tries to import the best
available memcache library.
Implementation notes: This cache backend works around some limitations in
memcached to simplify the interface. For example unicode keys are encoded
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
the keys in the same format as passed. Furthermore all get methods
silently ignore key errors to not cause problems when untrusted user data
is passed to the get methods which is often the case in web applications.
:param servers: a list or tuple of server addresses or alternatively
a :class:`memcache.Client` or a compatible client.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param key_prefix: a prefix that is added before all keys. This makes it
possible to use the same memcached server for different
applications. Keep in mind that
:meth:`~BaseCache.clear` will also clear keys with a
different prefix.
"""
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError('no memcache module found')
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_bytes(key_prefix)
def get(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
# memcached doesn't support keys longer than that. Because often
# checks for so long keys can occour because it's tested from user
# submitted data etc we fail silently for getting.
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
if isinstance(key, unicode):
encoded_key = key.encode('utf-8')
have_encoded_keys = True
else:
encoded_key = key
if self.key_prefix:
encoded_key = self.key_prefix + encoded_key
if _test_memcached_key(key):
key_mapping[encoded_key] = key
d = rv = self._client.get_multi(key_mapping.keys())
if have_encoded_keys or self.key_prefix:
rv = {}
for key, value in iteritems(d):
rv[key_mapping[key]] = value
if len(rv) < len(keys):
for key in keys:
if key not in rv:
rv[key] = None
return rv
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.add(key, value, timeout)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=None):
if timeout is None:
timeout = self.default_timeout
new_mapping = {}
for key, value in _items(mapping):
if isinstance(key, text_type):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
new_mapping[key] = value
self._client.set_multi(new_mapping, timeout)
def delete(self, key):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
if _test_memcached_key(key):
self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
if _test_memcached_key(key):
new_keys.append(key)
self._client.delete_multi(new_keys)
def clear(self):
self._client.flush_all()
def inc(self, key, delta=1):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.incr(key, delta)
def dec(self, key, delta=1):
if isinstance(key, unicode):
key = key.encode('utf-8')
if self.key_prefix:
key = self.key_prefix + key
self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
"""Returns an initialized memcache client. Used by the constructor."""
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers)
# backwards compatibility
GAEMemcachedCache = MemcachedCache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
The first argument can be either a string denoting address of the Redis
server or an object resembling an instance of a redis.Redis class.
Note: Python Redis API already takes care of encoding unicode strings on
the fly.
.. versionadded:: 0.7
.. versionadded:: 0.8
`key_prefix` was added.
.. versionchanged:: 0.8
This cache backend now properly serializes objects.
.. versionchanged:: 0.8.3
This cache backend now supports password authentication.
:param host: address of the Redis server or an object which API is
compatible with the official Python Redis client (redis-py).
:param port: port number on which Redis server listens for connections.
:param password: password authentication for the Redis server.
:param db: db (zero-based numeric index) on Redis Server to connect.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param key_prefix: A prefix that should be added to all keys.
"""
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if isinstance(host, string_types):
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
self._client = redis.Redis(host=host, port=port, password=password, db=db)
else:
self._client = host
self.key_prefix = key_prefix or ''
def dump_object(self, value):
"""Dumps an object into a string for redis. By default it serializes
integers as regular string and pickle dumps everything else.
"""
t = type(value)
if t in integer_types:
return str(value).encode('ascii')
return b'!' + pickle.dumps(value)
def load_object(self, value):
"""The reversal of :meth:`dump_object`. This might be callde with
None.
"""
if value is None:
return None
if value.startswith(b'!'):
return pickle.loads(value[1:])
try:
return int(value)
except ValueError:
# before 0.8 we did not have serialization. Still support that.
return value
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def get_many(self, *keys):
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return [self.load_object(x) for x in self._client.mget(keys)]
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
dump = self.dump_object(value)
self._client.setex(self.key_prefix + key, dump, timeout)
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
dump = self.dump_object(value)
added = self._client.setnx(self.key_prefix + key, dump)
if added:
self._client.expire(self.key_prefix + key, timeout)
def set_many(self, mapping, timeout=None):
if timeout is None:
timeout = self.default_timeout
pipe = self._client.pipeline()
for key, value in _items(mapping):
dump = self.dump_object(value)
pipe.setex(self.key_prefix + key, dump, timeout)
pipe.execute()
def delete(self, key):
self._client.delete(self.key_prefix + key)
def delete_many(self, *keys):
if not keys:
return
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
self._client.delete(*keys)
def clear(self):
if self.key_prefix:
keys = self._client.keys(self.key_prefix + '*')
if keys:
self._client.delete(*keys)
else:
self._client.flushdb()
def inc(self, key, delta=1):
return self._client.incr(self.key_prefix + key, delta)
def dec(self, key, delta=1):
return self._client.decr(self.key_prefix + key, delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
if not os.path.exists(self._path):
os.makedirs(self._path)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
for idx, fname in enumerate(entries):
remove = False
f = None
try:
try:
f = open(fname, 'rb')
expires = pickle.load(f)
remove = expires <= now or idx % 3 == 0
finally:
if f is not None:
f.close()
except Exception:
pass
if remove:
try:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
pass
def _get_filename(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8') #XXX unicode review
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
f = open(filename, 'rb')
try:
if pickle.load(f) >= time():
return pickle.load(f)
finally:
f.close()
os.remove(filename)
except Exception:
return None
def add(self, key, value, timeout=None):
filename = self._get_filename(key)
if not os.path.exists(filename):
self.set(key, value, timeout)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=self._path)
f = os.fdopen(fd, 'wb')
try:
pickle.dump(int(time() + timeout), f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
pass
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
pass
|
lgpl-3.0
|
lzw120/django
|
django/contrib/sessions/backends/cache.py
|
89
|
2261
|
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import cache
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = cache
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError("Unable to create a new session key.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
|
bsd-3-clause
|
hetland/xray
|
xray/test/test_utils.py
|
2
|
4438
|
import numpy as np
import pandas as pd
from xray.core import ops, utils
from xray.core.pycompat import OrderedDict
from . import TestCase
class TestSafeCastToIndex(TestCase):
def test(self):
dates = pd.date_range('2000-01-01', periods=10)
x = np.arange(5)
td = x * np.timedelta64(1, 'D')
for expected, array in [
(dates, dates.values),
(pd.Index(x, dtype=object), x.astype(object)),
(pd.Index(td), td),
(pd.Index(td, dtype=object), td.astype(object)),
]:
actual = utils.safe_cast_to_index(array)
self.assertArrayEqual(expected, actual)
self.assertEqual(expected.dtype, actual.dtype)
class TestArrayEquiv(TestCase):
def test_0d(self):
# verify our work around for pd.isnull not working for 0-dimensional
# object arrays
self.assertTrue(ops.array_equiv(0, np.array(0, dtype=object)))
self.assertTrue(
ops.array_equiv(np.nan, np.array(np.nan, dtype=object)))
self.assertFalse(
ops.array_equiv(0, np.array(1, dtype=object)))
class TestDictionaries(TestCase):
def setUp(self):
self.x = {'a': 'A', 'b': 'B'}
self.y = {'c': 'C', 'b': 'B'}
self.z = {'a': 'Z'}
def test_equivalent(self):
self.assertTrue(utils.equivalent(0, 0))
self.assertTrue(utils.equivalent(np.nan, np.nan))
self.assertTrue(utils.equivalent(0, np.array(0.0)))
self.assertTrue(utils.equivalent([0], np.array([0])))
self.assertTrue(utils.equivalent(np.array([0]), [0]))
self.assertTrue(utils.equivalent(np.arange(3), 1.0 * np.arange(3)))
self.assertFalse(utils.equivalent(0, np.zeros(3)))
def test_safe(self):
# should not raise exception:
utils.update_safety_check(self.x, self.y)
def test_unsafe(self):
with self.assertRaises(ValueError):
utils.update_safety_check(self.x, self.z)
def test_ordered_dict_intersection(self):
self.assertEqual({'b': 'B'},
utils.ordered_dict_intersection(self.x, self.y))
self.assertEqual({}, utils.ordered_dict_intersection(self.x, self.z))
def test_dict_equiv(self):
x = OrderedDict()
x['a'] = 3
x['b'] = np.array([1, 2, 3])
y = OrderedDict()
y['b'] = np.array([1.0, 2.0, 3.0])
y['a'] = 3
self.assertTrue(utils.dict_equiv(x, y)) # two nparrays are equal
y['b'] = [1, 2, 3] # np.array not the same as a list
self.assertTrue(utils.dict_equiv(x, y)) # nparray == list
x['b'] = [1.0, 2.0, 3.0]
self.assertTrue(utils.dict_equiv(x, y)) # list vs. list
x['c'] = None
self.assertFalse(utils.dict_equiv(x, y)) # new key in x
x['c'] = np.nan
y['c'] = np.nan
self.assertTrue(utils.dict_equiv(x, y)) # as intended, nan is nan
x['c'] = np.inf
y['c'] = np.inf
self.assertTrue(utils.dict_equiv(x, y)) # inf == inf
y = dict(y)
self.assertTrue(utils.dict_equiv(x, y)) # different dictionary types are fine
y['b'] = 3 * np.arange(3)
self.assertFalse(utils.dict_equiv(x, y)) # not equal when arrays differ
def test_frozen(self):
x = utils.Frozen(self.x)
with self.assertRaises(TypeError):
x['foo'] = 'bar'
with self.assertRaises(TypeError):
del x['a']
with self.assertRaises(AttributeError):
x.update(self.y)
self.assertEqual(x.mapping, self.x)
self.assertIn(repr(x), ("Frozen({'a': 'A', 'b': 'B'})",
"Frozen({'b': 'B', 'a': 'A'})"))
def test_sorted_keys_dict(self):
x = {'a': 1, 'b': 2, 'c': 3}
y = utils.SortedKeysDict(x)
self.assertItemsEqual(y, ['a', 'b', 'c'])
self.assertEqual(repr(utils.SortedKeysDict()),
"SortedKeysDict({})")
def test_chain_map(self):
m = utils.ChainMap({'x': 0, 'y': 1}, {'x': -100, 'z': 2})
self.assertIn('x', m)
self.assertIn('y', m)
self.assertIn('z', m)
self.assertEqual(m['x'], 0)
self.assertEqual(m['y'], 1)
self.assertEqual(m['z'], 2)
m['x'] = 100
self.assertEqual(m['x'], 100)
self.assertEqual(m.maps[0]['x'], 100)
self.assertItemsEqual(['x', 'y', 'z'], m)
|
apache-2.0
|
varunarya10/nova_test_latest
|
nova/api/openstack/compute/schemas/v3/volumes.py
|
51
|
2842
|
# Copyright 2014 IBM Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
'volume_type': {'type': 'string'},
'metadata': {'type': 'object'},
'snapshot_id': {'type': 'string'},
'size': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1
},
'availability_zone': {'type': 'string'},
'display_name': {'type': 'string'},
'display_description': {'type': 'string'},
},
'additionalProperties': False,
},
},
'required': ['volume'],
'additionalProperties': False,
}
snapshot_create = {
'type': 'object',
'properties': {
'snapshot': {
'type': 'object',
'properties': {
'volume_id': {'type': 'string'},
'force': parameter_types.boolean,
'display_name': {'type': 'string'},
'display_description': {'type': 'string'},
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['snapshot'],
'additionalProperties': False,
}
create_volume_attachment = {
'type': 'object',
'properties': {
'volumeAttachment': {
'type': 'object',
'properties': {
'volumeId': parameter_types.volume_id,
'device': {
'type': 'string',
# NOTE: The validation pattern from match_device() in
# nova/block_device.py.
'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$'
}
},
'required': ['volumeId'],
'additionalProperties': False,
},
},
'required': ['volumeAttachment'],
'additionalProperties': False,
}
update_volume_attachment = copy.deepcopy(create_volume_attachment)
del update_volume_attachment['properties']['volumeAttachment'][
'properties']['device']
|
apache-2.0
|
DavidGuben/rcbplayspokemon
|
app/pywin32-220/com/win32com/storagecon.py
|
25
|
3082
|
"""Constants related to IStorage and related interfaces
This file was generated by h2py from d:\msdev\include\objbase.h
then hand edited, a few extra constants added, etc.
"""
STGC_DEFAULT = 0
STGC_OVERWRITE = 1
STGC_ONLYIFCURRENT = 2
STGC_DANGEROUSLYCOMMITMERELYTODISKCACHE = 4
STGC_CONSOLIDATE = 8
STGTY_STORAGE = 1
STGTY_STREAM = 2
STGTY_LOCKBYTES = 3
STGTY_PROPERTY = 4
STREAM_SEEK_SET = 0
STREAM_SEEK_CUR = 1
STREAM_SEEK_END = 2
LOCK_WRITE = 1
LOCK_EXCLUSIVE = 2
LOCK_ONLYONCE = 4
# Generated as from here.
CWCSTORAGENAME = 32
STGM_DIRECT = 0x00000000
STGM_TRANSACTED = 0x00010000
STGM_SIMPLE = 0x08000000
STGM_READ = 0x00000000
STGM_WRITE = 0x00000001
STGM_READWRITE = 0x00000002
STGM_SHARE_DENY_NONE = 0x00000040
STGM_SHARE_DENY_READ = 0x00000030
STGM_SHARE_DENY_WRITE = 0x00000020
STGM_SHARE_EXCLUSIVE = 0x00000010
STGM_PRIORITY = 0x00040000
STGM_DELETEONRELEASE = 0x04000000
STGM_NOSCRATCH = 0x00100000
STGM_CREATE = 0x00001000
STGM_CONVERT = 0x00020000
STGM_FAILIFTHERE = 0x00000000
STGM_NOSNAPSHOT = 0x00200000
ASYNC_MODE_COMPATIBILITY = 0x00000001
ASYNC_MODE_DEFAULT = 0x00000000
STGTY_REPEAT = 0x00000100
STG_TOEND = 0xFFFFFFFF
STG_LAYOUT_SEQUENTIAL = 0x00000000
STG_LAYOUT_INTERLEAVED = 0x00000001
## access rights used with COM server ACL's
COM_RIGHTS_EXECUTE = 1
COM_RIGHTS_EXECUTE_LOCAL = 2
COM_RIGHTS_EXECUTE_REMOTE = 4
COM_RIGHTS_ACTIVATE_LOCAL = 8
COM_RIGHTS_ACTIVATE_REMOTE = 16
STGFMT_DOCUMENT = 0
STGFMT_STORAGE = 0
STGFMT_NATIVE = 1
STGFMT_FILE = 3
STGFMT_ANY = 4
STGFMT_DOCFILE = 5
PID_DICTIONARY = 0
PID_CODEPAGE = 1
PID_FIRST_USABLE = 2
PID_FIRST_NAME_DEFAULT = 4095
PID_LOCALE = -2147483648
PID_MODIFY_TIME = -2147483647
PID_SECURITY = -2147483646
PID_BEHAVIOR = -2147483645
PID_ILLEGAL = -1
PID_MIN_READONLY = -2147483648
PID_MAX_READONLY = -1073741825
## DiscardableInformation
PIDDI_THUMBNAIL = 0x00000002
## SummaryInformation
PIDSI_TITLE = 2
PIDSI_SUBJECT = 3
PIDSI_AUTHOR = 4
PIDSI_KEYWORDS = 5
PIDSI_COMMENTS = 6
PIDSI_TEMPLATE = 7
PIDSI_LASTAUTHOR = 8
PIDSI_REVNUMBER = 9
PIDSI_EDITTIME = 10
PIDSI_LASTPRINTED = 11
PIDSI_CREATE_DTM = 12
PIDSI_LASTSAVE_DTM = 13
PIDSI_PAGECOUNT = 14
PIDSI_WORDCOUNT = 15
PIDSI_CHARCOUNT = 16
PIDSI_THUMBNAIL = 17
PIDSI_APPNAME = 18
PIDSI_DOC_SECURITY = 19
## DocSummaryInformation
PIDDSI_CATEGORY = 2
PIDDSI_PRESFORMAT = 3
PIDDSI_BYTECOUNT = 4
PIDDSI_LINECOUNT = 5
PIDDSI_PARCOUNT = 6
PIDDSI_SLIDECOUNT = 7
PIDDSI_NOTECOUNT = 8
PIDDSI_HIDDENCOUNT = 9
PIDDSI_MMCLIPCOUNT = 10
PIDDSI_SCALE = 11
PIDDSI_HEADINGPAIR = 12
PIDDSI_DOCPARTS = 13
PIDDSI_MANAGER = 14
PIDDSI_COMPANY = 15
PIDDSI_LINKSDIRTY = 16
## MediaFileSummaryInfo
PIDMSI_EDITOR = 2
PIDMSI_SUPPLIER = 3
PIDMSI_SOURCE = 4
PIDMSI_SEQUENCE_NO = 5
PIDMSI_PROJECT = 6
PIDMSI_STATUS = 7
PIDMSI_OWNER = 8
PIDMSI_RATING = 9
PIDMSI_PRODUCTION = 10
PIDMSI_COPYRIGHT = 11
## PROPSETFLAG enum
PROPSETFLAG_DEFAULT = 0
PROPSETFLAG_NONSIMPLE = 1
PROPSETFLAG_ANSI = 2
PROPSETFLAG_UNBUFFERED = 4
PROPSETFLAG_CASE_SENSITIVE = 8
## STGMOVE enum
STGMOVE_MOVE = 0
STGMOVE_COPY = 1
STGMOVE_SHALLOWCOPY = 2
|
mit
|
gangadharkadam/letzerp
|
erpnext/hr/doctype/salary_slip/test_salary_slip.py
|
3
|
3968
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import unittest
import frappe
from frappe.utils import today
from erpnext.hr.doctype.employee.employee import make_salary_structure
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
class TestSalarySlip(unittest.TestCase):
def setUp(self):
frappe.db.sql("""delete from `tabLeave Application`""")
frappe.db.sql("""delete from `tabSalary Slip`""")
from erpnext.hr.doctype.leave_application.test_leave_application import _test_records as leave_applications
la = frappe.copy_doc(leave_applications[2])
la.insert()
la.status = "Approved"
la.submit()
def tearDown(self):
frappe.db.set_value("HR Settings", "HR Settings", "include_holidays_in_total_working_days", 0)
frappe.set_user("Administrator")
def test_salary_slip_with_holidays_included(self):
frappe.db.set_value("HR Settings", "HR Settings", "include_holidays_in_total_working_days", 1)
ss = frappe.copy_doc(test_records[0])
ss.insert()
self.assertEquals(ss.total_days_in_month, 31)
self.assertEquals(ss.payment_days, 30)
self.assertEquals(ss.earnings[0].e_modified_amount, 14516.13)
self.assertEquals(ss.earnings[1].e_modified_amount, 500)
self.assertEquals(ss.deductions[0].d_modified_amount, 100)
self.assertEquals(ss.deductions[1].d_modified_amount, 48.39)
self.assertEquals(ss.gross_pay, 15016.13)
self.assertEquals(ss.net_pay, 14867.74)
def test_salary_slip_with_holidays_excluded(self):
ss = frappe.copy_doc(test_records[0])
ss.insert()
self.assertEquals(ss.total_days_in_month, 30)
self.assertEquals(ss.payment_days, 29)
self.assertEquals(ss.earnings[0].e_modified_amount, 14500)
self.assertEquals(ss.earnings[1].e_modified_amount, 500)
self.assertEquals(ss.deductions[0].d_modified_amount, 100)
self.assertEquals(ss.deductions[1].d_modified_amount, 48.33)
self.assertEquals(ss.gross_pay, 15000)
self.assertEquals(ss.net_pay, 14851.67)
def test_employee_salary_slip_read_permission(self):
self.make_employee("[email protected]")
self.make_employee("[email protected]")
salary_slip_test_employee = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("[email protected]"))
salary_slip_test_employee_2 = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("[email protected]"))
frappe.set_user("[email protected]")
self.assertTrue(salary_slip_test_employee.has_permission("read"))
def make_employee(self, user):
if not frappe.db.get_value("User", user):
frappe.get_doc({
"doctype": "User",
"email": user,
"first_name": user,
"new_password": "password",
"user_roles": [{"doctype": "UserRole", "role": "Employee"}]
}).insert()
if not frappe.db.get_value("Employee", {"user_id": user}):
frappe.get_doc({
"doctype": "Employee",
"naming_series": "_T-Employee-",
"employee_name": user,
"user_id": user,
"company": "_Test Company",
"date_of_birth": "1990-05-08",
"date_of_joining": "2013-01-01",
"department": "_Test Department 1",
"gender": "Female",
"status": "Active"
}).insert()
def make_employee_salary_slip(self, user):
employee = frappe.db.get_value("Employee", {"user_id": user})
salary_structure = frappe.db.get_value("Salary Structure", {"employee": employee})
if not salary_structure:
salary_structure = make_salary_structure(employee)
salary_structure.from_date = today()
salary_structure.insert()
salary_structure = salary_structure.name
salary_slip = frappe.db.get_value("Salary Slip", {"employee": employee})
if not salary_slip:
salary_slip = make_salary_slip(salary_structure)
salary_slip.insert()
salary_slip.submit()
salary_slip = salary_slip.name
return salary_slip
test_dependencies = ["Leave Application"]
test_records = frappe.get_test_records('Salary Slip')
|
agpl-3.0
|
BorisJeremic/Real-ESSI-Examples
|
analytic_solution/test_cases/4NodeANDES/cantilever_irregular_element_with_divisions/shape3/NumberOfDivision4/horizontal_load/compare_txt.py
|
637
|
2094
|
#!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
|
cc0-1.0
|
docker-infra/ansible-modules-core
|
files/template.py
|
8
|
2670
|
# this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
required: false
choices: [ "yes", "no" ]
default: "yes"
notes:
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644
# The same example, but using symbolic modes equivalent to 0644
- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode="u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
'''
|
gpl-3.0
|
kamyu104/django
|
tests/m2m_signals/tests.py
|
271
|
15982
|
"""
Testing signals emitted on changing m2m relations.
"""
from django.db import models
from django.test import TestCase
from .models import Car, Part, Person, SportsCar
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
""" Install a listener on the two m2m relations. """
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}, {
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}
])
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}, {
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}
])
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
}, {
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
}
])
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts = [self.wheelset, self.doors, self.engine]
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Check that signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts = [self.doors]
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends = [self.bob, self.chuck]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}
])
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans = [self.daisy]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}
])
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols = [self.alice, self.bob]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}, {
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}
])
|
bsd-3-clause
|
cobrab11/blacksmith-2
|
expansions/muc/code.py
|
3
|
11539
|
# coding: utf-8
# BlackSmith mark.2
# exp_name = "muc" # /code.py v.x9
# Id: 05~5c
# Code © (2009-2012) by WitcherGeralt [[email protected]]
class expansion_temp(expansion):
def __init__(self, name):
expansion.__init__(self, name)
def command_subject(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
Chat = Chats[source[1]]
if Chat.isModer or getattr(Chat.get_user(Chat.nick), "role", (None,)*2)[1] == aRoles[9]:
Chat.subject(body)
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
sep = chr(47)
def command_ban(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
if Chats[source[1]].isModer:
if enough_access(source[1], source[2], 6) or getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5],))[0] != aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if jid and not enough_access(jid, None, 7) and jid != get_disp(disp):
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].outcast(jid, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_none(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
if Chats[source[1]].isModer:
if enough_access(source[1], source[2], 6) or getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5],))[0] != aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if jid:
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].none(jid, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_member(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
if Chats[source[1]].isModer:
if enough_access(source[1], source[2], 6) or getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5],))[0] != aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if jid:
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].member(jid, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_admin(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
if getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5],))[0] == aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if jid:
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].admin(jid, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_owner(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
if getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5],))[0] == aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if jid:
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].owner(jid, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_kick(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
aRole = getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5], None))
if Chats[source[1]].isModer or aRole[1] == aRoles[9]:
if enough_access(source[1], source[2], 6) or aRole[0] != aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
else:
jid, nick = None, None
if nick and jid and not enough_access(jid, None, 7) and jid != get_disp(disp):
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].kick(nick, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_visitor(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
aRole = getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5], None))
if Chats[source[1]].isModer or aRole[1] == aRoles[9]:
if enough_access(source[1], source[2], 6) or aRole[0] != aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
else:
jid, nick = None, None
if nick and jid and not enough_access(jid, None, 7) and jid != get_disp(disp):
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].visitor(nick, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_participant(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
aRole = getattr(Chats[source[1]].get_user(get_nick(source[1])), "role", (aRoles[5], None))
if Chats[source[1]].isModer or aRole[1] == aRoles[9]:
if enough_access(source[1], source[2], 6) or aRole[0] != aRoles[5]:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].participant(nick, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[0]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
def command_moder(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
if Chats[source[1]].isModer:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
Chats[source[1]].moder(nick, body, (None, (stype, source)))
else:
answer = AnsBase[7]
else:
answer = self.AnsBase[1]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
if locals().has_key(sBase[6]):
Answer(answer, stype, source, disp)
PerfDesc = {"done": 0, "fail": 0}
def HandleFB(self, disp, stanza, desc):
if xmpp.isResultNode(stanza):
desc["done"] += 1
else:
desc["fail"] += 1
def calcPerformance(self, desc):
cl = len(Chats.keys())
for x in xrange(60):
sleep(0.2)
if cl <= sum(desc.values()):
break
sl = sum(desc.values())
if cl > sl:
desc["none"] = (cl - sl)
answer = self.AnsBase[2] % desc
elif desc["fail"]:
answer = self.AnsBase[3] % desc
else:
answer = self.AnsBase[4]
return answer
def command_fullban(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if nick and jid and not enough_access(jid, None, 7) and jid != get_disp(disp):
if body:
body = "%s: %s" % (source[2], body[0].strip())
else:
body = "%s/%s" % (get_nick(source[1]), source[2])
desc = self.PerfDesc.copy()
for conf in Chats.itervalues():
conf.outcast(jid, body, (self.HandleFB, {"desc": desc}))
answer = self.calcPerformance(desc)
else:
answer = AnsBase[7]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
Answer(answer, stype, source, disp)
def command_fullunban(self, stype, source, body, disp):
if Chats.has_key(source[1]):
if body:
body = body.split(self.sep, 1)
nick = (body.pop(0)).strip()
if Chats[source[1]].isHere(nick):
jid = get_source(source[1], nick)
elif nick.count(chr(46)):
jid = nick
else:
jid = None
if jid:
desc = self.PerfDesc.copy()
for conf in Chats.itervalues():
conf.none(jid, handler = (self.HandleFB, {"desc": desc}))
answer = self.calcPerformance(desc)
else:
answer = AnsBase[7]
else:
answer = AnsBase[1]
else:
answer = AnsBase[0]
Answer(answer, stype, source, disp)
commands = (
(command_subject, "subject", 3,),
(command_ban, "ban", 5,),
(command_none, "none", 5,),
(command_member, "member", 5,),
(command_admin, "admin", 6,),
(command_owner, "owner", 6,),
(command_kick, "kick", 3,),
(command_visitor, "visitor", 3,),
(command_participant, "participant", 3,),
(command_moder, "moder", 5,),
(command_fullban, "fullban", 7,),
(command_fullunban, "fullunban", 7,)
)
|
apache-2.0
|
nhtera/github.io
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_scilab_builtins.py
|
364
|
31261
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._scilab_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the ScilabLexer.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# These lists are generated automatically.
# Run the following in a Scilab script:
#
# varType=["functions", "commands", "macros", "variables" ];
# fd = mopen('list.txt','wt');
#
# for j=1:size(varType,"*")
# myStr="";
# a=completion("",varType(j));
# myStr=varType(j)+"_kw = [";
# for i=1:size(a,"*")
# myStr = myStr + """" + a(i) + """";
# if size(a,"*") <> i then
# myStr = myStr + ","; end
# end
# myStr = myStr + "]";
# mputl(myStr,fd);
# end
# mclose(fd);
#
# Then replace "$" by "\\$" manually.
functions_kw = ["%XMLAttr_6","%XMLAttr_e","%XMLAttr_i_XMLElem","%XMLAttr_length","%XMLAttr_p","%XMLAttr_size","%XMLDoc_6","%XMLDoc_e","%XMLDoc_i_XMLList","%XMLDoc_p","%XMLElem_6","%XMLElem_e","%XMLElem_i_XMLDoc","%XMLElem_i_XMLElem","%XMLElem_i_XMLList","%XMLElem_p","%XMLList_6","%XMLList_e","%XMLList_i_XMLElem","%XMLList_i_XMLList","%XMLList_length","%XMLList_p","%XMLList_size","%XMLNs_6","%XMLNs_e","%XMLNs_i_XMLElem","%XMLNs_p","%XMLSet_6","%XMLSet_e","%XMLSet_length","%XMLSet_p","%XMLSet_size","%XMLValid_p","%b_i_XMLList","%c_i_XMLAttr","%c_i_XMLDoc","%c_i_XMLElem","%c_i_XMLList","%ce_i_XMLList","%fptr_i_XMLList","%h_i_XMLList","%hm_i_XMLList","%i_abs","%i_cumprod","%i_cumsum","%i_diag","%i_i_XMLList","%i_matrix","%i_max","%i_maxi","%i_min","%i_mini","%i_mput","%i_p","%i_prod","%i_sum","%i_tril","%i_triu","%ip_i_XMLList","%l_i_XMLList","%lss_i_XMLList","%mc_i_XMLList","%msp_full","%msp_i_XMLList","%msp_spget","%p_i_XMLList","%ptr_i_XMLList","%r_i_XMLList","%s_i_XMLList","%sp_i_XMLList","%spb_i_XMLList","%st_i_XMLList","Calendar","ClipBoard","Matplot","Matplot1","PlaySound","TCL_DeleteInterp","TCL_DoOneEvent","TCL_EvalFile","TCL_EvalStr","TCL_ExistArray","TCL_ExistInterp","TCL_ExistVar","TCL_GetVar","TCL_GetVersion","TCL_SetVar","TCL_UnsetVar","TCL_UpVar","_","_code2str","_str2code","about","abs","acos","addcb","addf","addhistory","addinter","amell","and","argn","arl2_ius","ascii","asin","atan","backslash","balanc","banner","base2dec","basename","bdiag","beep","besselh","besseli","besselj","besselk","bessely","beta","bezout","bfinit","blkfc1i","blkslvi","bool2s","browsehistory","browsevar","bsplin3val","buildDocv2","buildouttb","bvode","c_link","calerf","call","callblk","captions","cd","cdfbet","cdfbin","cdfchi","cdfchn","cdff","cdffnc","cdfgam","cdfnbn","cdfnor","cdfpoi","cdft","ceil","champ","champ1","chdir","chol","clc","clean","clear","clear_pixmap","clearfun","clearglobal","closeEditor","closeXcos","code2str","coeff","comp","completion","conj","contour2di","contr","conv2","convstr","copy","copyfile","corr","cos","coserror","createdir","cshep2d","ctree2","ctree3","ctree4","cumprod","cumsum","curblock","curblockc","dasrt","dassl","data2sig","debug","dec2base","deff","definedfields","degree","delbpt","delete","deletefile","delip","delmenu","det","dgettext","dhinf","diag","diary","diffobjs","disp","dispbpt","displayhistory","disposefftwlibrary","dlgamma","dnaupd","dneupd","double","draw","drawaxis","drawlater","drawnow","dsaupd","dsearch","dseupd","duplicate","editor","editvar","emptystr","end_scicosim","ereduc","errcatch","errclear","error","eval_cshep2d","exec","execstr","exists","exit","exp","expm","exportUI","export_to_hdf5","eye","fadj2sp","fec","feval","fft","fftw","fftw_flags","fftw_forget_wisdom","fftwlibraryisloaded","file","filebrowser","fileext","fileinfo","fileparts","filesep","find","findBD","findfiles","floor","format","fort","fprintfMat","freq","frexp","fromc","fromjava","fscanfMat","fsolve","fstair","full","fullpath","funcprot","funptr","gamma","gammaln","geom3d","get","get_absolute_file_path","get_fftw_wisdom","getblocklabel","getcallbackobject","getdate","getdebuginfo","getdefaultlanguage","getdrives","getdynlibext","getenv","getfield","gethistory","gethistoryfile","getinstalledlookandfeels","getio","getlanguage","getlongpathname","getlookandfeel","getmd5","getmemory","getmodules","getos","getpid","getrelativefilename","getscicosvars","getscilabmode","getshortpathname","gettext","getvariablesonstack","getversion","glist","global","glue","grand","grayplot","grep","gsort","gstacksize","havewindow","helpbrowser","hess","hinf","historymanager","historysize","host","iconvert","iconvert","ieee","ilib_verbose","imag","impl","import_from_hdf5","imult","inpnvi","int","int16","int2d","int32","int3d","int8","interp","interp2d","interp3d","intg","intppty","inttype","inv","is_handle_valid","isalphanum","isascii","isdef","isdigit","isdir","isequal","isequalbitwise","iserror","isfile","isglobal","isletter","isreal","iswaitingforinput","javaclasspath","javalibrarypath","kron","lasterror","ldiv","ldivf","legendre","length","lib","librarieslist","libraryinfo","linear_interpn","lines","link","linmeq","list","load","loadScicos","loadfftwlibrary","loadhistory","log","log1p","lsq","lsq_splin","lsqrsolve","lsslist","lstcat","lstsize","ltitr","lu","ludel","lufact","luget","lusolve","macr2lst","macr2tree","matfile_close","matfile_listvar","matfile_open","matfile_varreadnext","matfile_varwrite","matrix","max","maxfiles","mclearerr","mclose","meof","merror","messagebox","mfprintf","mfscanf","mget","mgeti","mgetl","mgetstr","min","mlist","mode","model2blk","mopen","move","movefile","mprintf","mput","mputl","mputstr","mscanf","mseek","msprintf","msscanf","mtell","mtlb_mode","mtlb_sparse","mucomp","mulf","nearfloat","newaxes","newest","newfun","nnz","notify","number_properties","ode","odedc","ones","opentk","optim","or","ordmmd","parallel_concurrency","parallel_run","param3d","param3d1","part","pathconvert","pathsep","phase_simulation","plot2d","plot2d1","plot2d2","plot2d3","plot2d4","plot3d","plot3d1","pointer_xproperty","poly","ppol","pppdiv","predef","print","printf","printfigure","printsetupbox","prod","progressionbar","prompt","pwd","qld","qp_solve","qr","raise_window","rand","rankqr","rat","rcond","rdivf","read","read4b","readb","readgateway","readmps","real","realtime","realtimeinit","regexp","relocate_handle","remez","removedir","removelinehistory","res_with_prec","resethistory","residu","resume","return","ricc","ricc_old","rlist","roots","rotate_axes","round","rpem","rtitr","rubberbox","save","saveafterncommands","saveconsecutivecommands","savehistory","schur","sci_haltscicos","sci_tree2","sci_tree3","sci_tree4","sciargs","scicos_debug","scicos_debug_count","scicos_time","scicosim","scinotes","sctree","semidef","set","set_blockerror","set_fftw_wisdom","set_xproperty","setbpt","setdefaultlanguage","setenv","setfield","sethistoryfile","setlanguage","setlookandfeel","setmenu","sfact","sfinit","show_pixmap","show_window","showalluimenushandles","sident","sig2data","sign","simp","simp_mode","sin","size","slash","sleep","sorder","sparse","spchol","spcompack","spec","spget","splin","splin2d","splin3d","spones","sprintf","sqrt","stacksize","str2code","strcat","strchr","strcmp","strcspn","strindex","string","stringbox","stripblanks","strncpy","strrchr","strrev","strsplit","strspn","strstr","strsubst","strtod","strtok","subf","sum","svd","swap_handles","symfcti","syredi","system_getproperty","system_setproperty","ta2lpd","tan","taucs_chdel","taucs_chfact","taucs_chget","taucs_chinfo","taucs_chsolve","tempname","testmatrix","timer","tlist","tohome","tokens","toolbar","toprint","tr_zer","tril","triu","type","typename","uiDisplayTree","uicontextmenu","uicontrol","uigetcolor","uigetdir","uigetfile","uigetfont","uimenu","uint16","uint32","uint8","uipopup","uiputfile","uiwait","ulink","umf_ludel","umf_lufact","umf_luget","umf_luinfo","umf_lusolve","umfpack","unglue","unix","unsetmenu","unzoom","updatebrowsevar","usecanvas","user","var2vec","varn","vec2var","waitbar","warnBlockByUID","warning","what","where","whereis","who","winsid","with_embedded_jre","with_module","writb","write","write4b","x_choose","x_choose_modeless","x_dialog","x_mdialog","xarc","xarcs","xarrows","xchange","xchoicesi","xclick","xcos","xcosAddToolsMenu","xcosConfigureXmlFile","xcosDiagramToScilab","xcosPalCategoryAdd","xcosPalDelete","xcosPalDisable","xcosPalEnable","xcosPalGenerateIcon","xcosPalLoad","xcosPalMove","xcosUpdateBlock","xdel","xfarc","xfarcs","xfpoly","xfpolys","xfrect","xget","xgetech","xgetmouse","xgraduate","xgrid","xlfont","xls_open","xls_read","xmlAddNs","xmlAsNumber","xmlAsText","xmlDTD","xmlDelete","xmlDocument","xmlDump","xmlElement","xmlFormat","xmlGetNsByHref","xmlGetNsByPrefix","xmlGetOpenDocs","xmlIsValidObject","xmlNs","xmlRead","xmlReadStr","xmlRelaxNG","xmlRemove","xmlSchema","xmlSetAttributes","xmlValidate","xmlWrite","xmlXPath","xname","xpause","xpoly","xpolys","xrect","xrects","xs2bmp","xs2eps","xs2gif","xs2jpg","xs2pdf","xs2png","xs2ppm","xs2ps","xs2svg","xsegs","xset","xsetech","xstring","xstringb","xtitle","zeros","znaupd","zneupd","zoom_rect"]
commands_kw = ["abort","apropos","break","case","catch","clc","clear","continue","do","else","elseif","end","endfunction","exit","for","function","help","if","pause","pwd","quit","resume","return","select","then","try","what","while","who"]
macros_kw = ["%0_i_st","%3d_i_h","%Block_xcosUpdateBlock","%TNELDER_p","%TNELDER_string","%TNMPLOT_p","%TNMPLOT_string","%TOPTIM_p","%TOPTIM_string","%TSIMPLEX_p","%TSIMPLEX_string","%_gsort","%_strsplit","%ar_p","%asn","%b_a_b","%b_a_s","%b_c_s","%b_c_spb","%b_cumprod","%b_cumsum","%b_d_s","%b_diag","%b_e","%b_f_s","%b_f_spb","%b_g_s","%b_g_spb","%b_h_s","%b_h_spb","%b_i_b","%b_i_ce","%b_i_h","%b_i_hm","%b_i_s","%b_i_sp","%b_i_spb","%b_i_st","%b_iconvert","%b_l_b","%b_l_s","%b_m_b","%b_m_s","%b_matrix","%b_n_hm","%b_o_hm","%b_p_s","%b_prod","%b_r_b","%b_r_s","%b_s_b","%b_s_s","%b_string","%b_sum","%b_tril","%b_triu","%b_x_b","%b_x_s","%c_a_c","%c_b_c","%c_b_s","%c_diag","%c_e","%c_eye","%c_f_s","%c_i_c","%c_i_ce","%c_i_h","%c_i_hm","%c_i_lss","%c_i_r","%c_i_s","%c_i_st","%c_matrix","%c_n_l","%c_n_st","%c_o_l","%c_o_st","%c_ones","%c_rand","%c_tril","%c_triu","%cblock_c_cblock","%cblock_c_s","%cblock_e","%cblock_f_cblock","%cblock_p","%cblock_size","%ce_6","%ce_c_ce","%ce_e","%ce_f_ce","%ce_i_ce","%ce_i_s","%ce_i_st","%ce_matrix","%ce_p","%ce_size","%ce_string","%ce_t","%champdat_i_h","%choose","%diagram_xcos","%dir_p","%fptr_i_st","%grayplot_i_h","%h_i_st","%hm_1_hm","%hm_1_s","%hm_2_hm","%hm_2_s","%hm_3_hm","%hm_3_s","%hm_4_hm","%hm_4_s","%hm_5","%hm_a_hm","%hm_a_r","%hm_a_s","%hm_abs","%hm_and","%hm_bool2s","%hm_c_hm","%hm_ceil","%hm_conj","%hm_cos","%hm_cumprod","%hm_cumsum","%hm_d_hm","%hm_d_s","%hm_degree","%hm_e","%hm_exp","%hm_f_hm","%hm_fft","%hm_find","%hm_floor","%hm_g_hm","%hm_h_hm","%hm_i_b","%hm_i_ce","%hm_i_hm","%hm_i_i","%hm_i_p","%hm_i_r","%hm_i_s","%hm_i_st","%hm_iconvert","%hm_imag","%hm_int","%hm_isnan","%hm_isreal","%hm_j_hm","%hm_j_s","%hm_k_hm","%hm_k_s","%hm_log","%hm_m_p","%hm_m_r","%hm_m_s","%hm_matrix","%hm_maxi","%hm_mean","%hm_median","%hm_mini","%hm_n_b","%hm_n_c","%hm_n_hm","%hm_n_i","%hm_n_p","%hm_n_s","%hm_o_b","%hm_o_c","%hm_o_hm","%hm_o_i","%hm_o_p","%hm_o_s","%hm_ones","%hm_or","%hm_p","%hm_prod","%hm_q_hm","%hm_r_s","%hm_rand","%hm_real","%hm_round","%hm_s","%hm_s_hm","%hm_s_r","%hm_s_s","%hm_sign","%hm_sin","%hm_size","%hm_sqrt","%hm_st_deviation","%hm_string","%hm_sum","%hm_x_hm","%hm_x_p","%hm_x_s","%hm_zeros","%i_1_s","%i_2_s","%i_3_s","%i_4_s","%i_Matplot","%i_a_i","%i_a_s","%i_and","%i_ascii","%i_b_s","%i_bezout","%i_champ","%i_champ1","%i_contour","%i_contour2d","%i_d_i","%i_d_s","%i_e","%i_fft","%i_g_i","%i_gcd","%i_h_i","%i_i_ce","%i_i_h","%i_i_hm","%i_i_i","%i_i_s","%i_i_st","%i_j_i","%i_j_s","%i_l_s","%i_lcm","%i_length","%i_m_i","%i_m_s","%i_mfprintf","%i_mprintf","%i_msprintf","%i_n_s","%i_o_s","%i_or","%i_p_i","%i_p_s","%i_plot2d","%i_plot2d1","%i_plot2d2","%i_q_s","%i_r_i","%i_r_s","%i_round","%i_s_i","%i_s_s","%i_sign","%i_string","%i_x_i","%i_x_s","%ip_a_s","%ip_i_st","%ip_m_s","%ip_n_ip","%ip_o_ip","%ip_p","%ip_s_s","%ip_string","%k","%l_i_h","%l_i_s","%l_i_st","%l_isequal","%l_n_c","%l_n_l","%l_n_m","%l_n_p","%l_n_s","%l_n_st","%l_o_c","%l_o_l","%l_o_m","%l_o_p","%l_o_s","%l_o_st","%lss_a_lss","%lss_a_p","%lss_a_r","%lss_a_s","%lss_c_lss","%lss_c_p","%lss_c_r","%lss_c_s","%lss_e","%lss_eye","%lss_f_lss","%lss_f_p","%lss_f_r","%lss_f_s","%lss_i_ce","%lss_i_lss","%lss_i_p","%lss_i_r","%lss_i_s","%lss_i_st","%lss_inv","%lss_l_lss","%lss_l_p","%lss_l_r","%lss_l_s","%lss_m_lss","%lss_m_p","%lss_m_r","%lss_m_s","%lss_n_lss","%lss_n_p","%lss_n_r","%lss_n_s","%lss_norm","%lss_o_lss","%lss_o_p","%lss_o_r","%lss_o_s","%lss_ones","%lss_r_lss","%lss_r_p","%lss_r_r","%lss_r_s","%lss_rand","%lss_s","%lss_s_lss","%lss_s_p","%lss_s_r","%lss_s_s","%lss_size","%lss_t","%lss_v_lss","%lss_v_p","%lss_v_r","%lss_v_s","%lt_i_s","%m_n_l","%m_o_l","%mc_i_h","%mc_i_s","%mc_i_st","%mc_n_st","%mc_o_st","%mc_string","%mps_p","%mps_string","%msp_a_s","%msp_abs","%msp_e","%msp_find","%msp_i_s","%msp_i_st","%msp_length","%msp_m_s","%msp_maxi","%msp_n_msp","%msp_nnz","%msp_o_msp","%msp_p","%msp_sparse","%msp_spones","%msp_t","%p_a_lss","%p_a_r","%p_c_lss","%p_c_r","%p_cumprod","%p_cumsum","%p_d_p","%p_d_r","%p_d_s","%p_det","%p_e","%p_f_lss","%p_f_r","%p_i_ce","%p_i_h","%p_i_hm","%p_i_lss","%p_i_p","%p_i_r","%p_i_s","%p_i_st","%p_inv","%p_j_s","%p_k_p","%p_k_r","%p_k_s","%p_l_lss","%p_l_p","%p_l_r","%p_l_s","%p_m_hm","%p_m_lss","%p_m_r","%p_matrix","%p_n_l","%p_n_lss","%p_n_r","%p_o_l","%p_o_lss","%p_o_r","%p_o_sp","%p_p_s","%p_prod","%p_q_p","%p_q_r","%p_q_s","%p_r_lss","%p_r_p","%p_r_r","%p_r_s","%p_s_lss","%p_s_r","%p_simp","%p_string","%p_sum","%p_v_lss","%p_v_p","%p_v_r","%p_v_s","%p_x_hm","%p_x_r","%p_y_p","%p_y_r","%p_y_s","%p_z_p","%p_z_r","%p_z_s","%r_a_hm","%r_a_lss","%r_a_p","%r_a_r","%r_a_s","%r_c_lss","%r_c_p","%r_c_r","%r_c_s","%r_clean","%r_cumprod","%r_d_p","%r_d_r","%r_d_s","%r_det","%r_diag","%r_e","%r_eye","%r_f_lss","%r_f_p","%r_f_r","%r_f_s","%r_i_ce","%r_i_hm","%r_i_lss","%r_i_p","%r_i_r","%r_i_s","%r_i_st","%r_inv","%r_j_s","%r_k_p","%r_k_r","%r_k_s","%r_l_lss","%r_l_p","%r_l_r","%r_l_s","%r_m_hm","%r_m_lss","%r_m_p","%r_m_r","%r_m_s","%r_matrix","%r_n_lss","%r_n_p","%r_n_r","%r_n_s","%r_norm","%r_o_lss","%r_o_p","%r_o_r","%r_o_s","%r_ones","%r_p","%r_p_s","%r_prod","%r_q_p","%r_q_r","%r_q_s","%r_r_lss","%r_r_p","%r_r_r","%r_r_s","%r_rand","%r_s","%r_s_hm","%r_s_lss","%r_s_p","%r_s_r","%r_s_s","%r_simp","%r_size","%r_string","%r_sum","%r_t","%r_tril","%r_triu","%r_v_lss","%r_v_p","%r_v_r","%r_v_s","%r_x_p","%r_x_r","%r_x_s","%r_y_p","%r_y_r","%r_y_s","%r_z_p","%r_z_r","%r_z_s","%s_1_hm","%s_1_i","%s_2_hm","%s_2_i","%s_3_hm","%s_3_i","%s_4_hm","%s_4_i","%s_5","%s_a_b","%s_a_hm","%s_a_i","%s_a_ip","%s_a_lss","%s_a_msp","%s_a_r","%s_a_sp","%s_and","%s_b_i","%s_b_s","%s_c_b","%s_c_cblock","%s_c_lss","%s_c_r","%s_c_sp","%s_d_b","%s_d_i","%s_d_p","%s_d_r","%s_d_sp","%s_e","%s_f_b","%s_f_cblock","%s_f_lss","%s_f_r","%s_f_sp","%s_g_b","%s_g_s","%s_h_b","%s_h_s","%s_i_b","%s_i_c","%s_i_ce","%s_i_h","%s_i_hm","%s_i_i","%s_i_lss","%s_i_p","%s_i_r","%s_i_s","%s_i_sp","%s_i_spb","%s_i_st","%s_j_i","%s_k_hm","%s_k_p","%s_k_r","%s_k_sp","%s_l_b","%s_l_hm","%s_l_i","%s_l_lss","%s_l_p","%s_l_r","%s_l_s","%s_l_sp","%s_m_b","%s_m_hm","%s_m_i","%s_m_ip","%s_m_lss","%s_m_msp","%s_m_r","%s_matrix","%s_n_hm","%s_n_i","%s_n_l","%s_n_lss","%s_n_r","%s_n_st","%s_o_hm","%s_o_i","%s_o_l","%s_o_lss","%s_o_r","%s_o_st","%s_or","%s_p_b","%s_p_i","%s_pow","%s_q_hm","%s_q_i","%s_q_p","%s_q_r","%s_q_sp","%s_r_b","%s_r_i","%s_r_lss","%s_r_p","%s_r_r","%s_r_s","%s_r_sp","%s_s_b","%s_s_hm","%s_s_i","%s_s_ip","%s_s_lss","%s_s_r","%s_s_sp","%s_simp","%s_v_lss","%s_v_p","%s_v_r","%s_v_s","%s_x_b","%s_x_hm","%s_x_i","%s_x_r","%s_y_p","%s_y_r","%s_y_sp","%s_z_p","%s_z_r","%s_z_sp","%sn","%sp_a_s","%sp_a_sp","%sp_and","%sp_c_s","%sp_ceil","%sp_cos","%sp_cumprod","%sp_cumsum","%sp_d_s","%sp_d_sp","%sp_diag","%sp_e","%sp_exp","%sp_f_s","%sp_floor","%sp_gsort","%sp_i_ce","%sp_i_h","%sp_i_s","%sp_i_sp","%sp_i_st","%sp_int","%sp_inv","%sp_k_s","%sp_k_sp","%sp_l_s","%sp_l_sp","%sp_length","%sp_norm","%sp_or","%sp_p_s","%sp_prod","%sp_q_s","%sp_q_sp","%sp_r_s","%sp_r_sp","%sp_round","%sp_s_s","%sp_s_sp","%sp_sin","%sp_sqrt","%sp_string","%sp_sum","%sp_tril","%sp_triu","%sp_y_s","%sp_y_sp","%sp_z_s","%sp_z_sp","%spb_and","%spb_c_b","%spb_cumprod","%spb_cumsum","%spb_diag","%spb_e","%spb_f_b","%spb_g_b","%spb_g_spb","%spb_h_b","%spb_h_spb","%spb_i_b","%spb_i_ce","%spb_i_h","%spb_i_st","%spb_or","%spb_prod","%spb_sum","%spb_tril","%spb_triu","%st_6","%st_c_st","%st_e","%st_f_st","%st_i_b","%st_i_c","%st_i_fptr","%st_i_h","%st_i_i","%st_i_ip","%st_i_lss","%st_i_msp","%st_i_p","%st_i_r","%st_i_s","%st_i_sp","%st_i_spb","%st_i_st","%st_matrix","%st_n_c","%st_n_l","%st_n_mc","%st_n_p","%st_n_s","%st_o_c","%st_o_l","%st_o_mc","%st_o_p","%st_o_s","%st_o_tl","%st_p","%st_size","%st_string","%st_t","%ticks_i_h","%xls_e","%xls_p","%xlssheet_e","%xlssheet_p","%xlssheet_size","%xlssheet_string","DominationRank","G_make","IsAScalar","NDcost","OS_Version","PlotSparse","ReadHBSparse","ReadmiMatrix","TCL_CreateSlave","WritemiMatrix","abcd","abinv","accept_func_default","accept_func_vfsa","acf","acosd","acosh","acoshm","acosm","acot","acotd","acoth","acsc","acscd","acsch","add_demo","add_help_chapter","add_module_help_chapter","add_param","add_profiling","adj2sp","aff2ab","ana_style","analpf","analyze","aplat","apropos","arhnk","arl2","arma2p","armac","armax","armax1","arobasestring2strings","arsimul","ascii2string","asciimat","asec","asecd","asech","asind","asinh","asinhm","asinm","assert_checkalmostequal","assert_checkequal","assert_checkerror","assert_checkfalse","assert_checkfilesequal","assert_checktrue","assert_comparecomplex","assert_computedigits","assert_cond2reltol","assert_cond2reqdigits","assert_generror","atand","atanh","atanhm","atanm","atomsAutoload","atomsAutoloadAdd","atomsAutoloadDel","atomsAutoloadList","atomsCategoryList","atomsCheckModule","atomsDepTreeShow","atomsGetConfig","atomsGetInstalled","atomsGetLoaded","atomsGetLoadedPath","atomsInstall","atomsIsInstalled","atomsIsLoaded","atomsList","atomsLoad","atomsRemove","atomsRepositoryAdd","atomsRepositoryDel","atomsRepositoryList","atomsRestoreConfig","atomsSaveConfig","atomsSearch","atomsSetConfig","atomsShow","atomsSystemInit","atomsSystemUpdate","atomsTest","atomsUpdate","atomsVersion","augment","auread","auwrite","balreal","bench_run","bilin","bilt","bin2dec","binomial","bitand","bitcmp","bitget","bitor","bitset","bitxor","black","blanks","bloc2exp","bloc2ss","block_parameter_error","bode","bstap","buttmag","bvodeS","bytecode","bytecodewalk","cainv","calendar","calfrq","canon","casc","cat","cat_code","cb_m2sci_gui","ccontrg","cell","cell2mat","cellstr","center","cepstrum","cfspec","char","chart","cheb1mag","cheb2mag","check_gateways","check_help","check_modules_xml","check_versions","chepol","chfact","chsolve","classmarkov","clean_help","clock","cls2dls","cmb_lin","cmndred","cmoment","coding_ga_binary","coding_ga_identity","coff","coffg","colcomp","colcompr","colinout","colregul","companion","complex","compute_initial_temp","cond","cond2sp","condestsp","config","configure_msifort","configure_msvc","cont_frm","cont_mat","contrss","conv","convert_to_float","convertindex","convol","convol2d","copfac","correl","cosd","cosh","coshm","cosm","cotd","cotg","coth","cothm","covar","createfun","createstruct","crossover_ga_binary","crossover_ga_default","csc","cscd","csch","csgn","csim","cspect","ctr_gram","czt","dae","daeoptions","damp","datafit","date","datenum","datevec","dbphi","dcf","ddp","dec2bin","dec2hex","dec2oct","del_help_chapter","del_module_help_chapter","demo_begin","demo_choose","demo_compiler","demo_end","demo_file_choice","demo_folder_choice","demo_function_choice","demo_gui","demo_mdialog","demo_message","demo_run","demo_viewCode","denom","derivat","derivative","des2ss","des2tf","detectmsifort64tools","detectmsvc64tools","determ","detr","detrend","devtools_run_builder","dft","dhnorm","diff","diophant","dir","dirname","dispfiles","dllinfo","dscr","dsimul","dt_ility","dtsi","edit","edit_error","eigenmarkov","ell1mag","enlarge_shape","entropy","eomday","epred","eqfir","eqiir","equil","equil1","erf","erfc","erfcx","erfinv","etime","eval","evans","evstr","expression2code","extract_help_examples","factor","factorial","factors","faurre","ffilt","fft2","fftshift","fieldnames","filt_sinc","filter","findABCD","findAC","findBDK","findR","find_freq","find_links","find_scicos_version","findm","findmsifortcompiler","findmsvccompiler","findx0BD","firstnonsingleton","fit_dat","fix","fixedpointgcd","flipdim","flts","fminsearch","format_txt","fourplan","fprintf","frep2tf","freson","frfit","frmag","fscanf","fseek_origin","fsfirlin","fspec","fspecg","fstabst","ftest","ftuneq","fullfile","fullrf","fullrfk","fun2string","g_margin","gainplot","gamitg","gcare","gcd","gencompilationflags_unix","generateBlockImage","generateBlockImages","generic_i_ce","generic_i_h","generic_i_hm","generic_i_s","generic_i_st","genlib","genlib_old","genmarkov","geomean","getDiagramVersion","getModelicaPath","get_file_path","get_function_path","get_param","get_profile","get_scicos_version","getd","getscilabkeywords","getshell","gettklib","gfare","gfrancis","givens","glever","gmres","group","gschur","gspec","gtild","h2norm","h_cl","h_inf","h_inf_st","h_norm","hallchart","halt","hank","hankelsv","harmean","haveacompiler","head_comments","help","help_from_sci","help_skeleton","hermit","hex2dec","hilb","hilbert","horner","householder","hrmt","htrianr","hypermat","ifft","iir","iirgroup","iirlp","iirmod","ilib_build","ilib_compile","ilib_for_link","ilib_gen_Make","ilib_gen_Make_unix","ilib_gen_cleaner","ilib_gen_gateway","ilib_gen_loader","ilib_include_flag","ilib_mex_build","im_inv","importScicosDiagram","importScicosPal","importXcosDiagram","imrep2ss","ind2sub","inistate","init_ga_default","init_param","initial_scicos_tables","input","instruction2code","intc","intdec","integrate","interp1","interpln","intersect","intl","intsplin","inttrap","inv_coeff","invr","invrs","invsyslin","iqr","isLeapYear","is_absolute_path","is_param","iscell","iscellstr","isempty","isfield","isinf","isnan","isnum","issparse","isstruct","isvector","jmat","justify","kalm","karmarkar","kernel","kpure","krac2","kroneck","lattn","launchtest","lcf","lcm","lcmdiag","leastsq","leqe","leqr","lev","levin","lex_sort","lft","lin","lin2mu","lincos","lindquist","linf","linfn","linsolve","linspace","list2vec","list_param","listfiles","listfunctions","listvarinfile","lmisolver","lmitool","loadXcosLibs","loadmatfile","loadwave","log10","log2","logm","logspace","lqe","lqg","lqg2stan","lqg_ltr","lqr","ls","lyap","m2sci_gui","m_circle","macglov","macrovar","mad","makecell","manedit","mapsound","markp2ss","matfile2sci","mdelete","mean","meanf","median","mese","meshgrid","mfft","mfile2sci","minreal","minss","mkdir","modulo","moment","mrfit","msd","mstr2sci","mtlb","mtlb_0","mtlb_a","mtlb_all","mtlb_any","mtlb_axes","mtlb_axis","mtlb_beta","mtlb_box","mtlb_choices","mtlb_close","mtlb_colordef","mtlb_cond","mtlb_conv","mtlb_cov","mtlb_cumprod","mtlb_cumsum","mtlb_dec2hex","mtlb_delete","mtlb_diag","mtlb_diff","mtlb_dir","mtlb_double","mtlb_e","mtlb_echo","mtlb_error","mtlb_eval","mtlb_exist","mtlb_eye","mtlb_false","mtlb_fft","mtlb_fftshift","mtlb_filter","mtlb_find","mtlb_findstr","mtlb_fliplr","mtlb_fopen","mtlb_format","mtlb_fprintf","mtlb_fread","mtlb_fscanf","mtlb_full","mtlb_fwrite","mtlb_get","mtlb_grid","mtlb_hold","mtlb_i","mtlb_ifft","mtlb_image","mtlb_imp","mtlb_int16","mtlb_int32","mtlb_int8","mtlb_is","mtlb_isa","mtlb_isfield","mtlb_isletter","mtlb_isspace","mtlb_l","mtlb_legendre","mtlb_linspace","mtlb_logic","mtlb_logical","mtlb_loglog","mtlb_lower","mtlb_max","mtlb_mean","mtlb_median","mtlb_mesh","mtlb_meshdom","mtlb_min","mtlb_more","mtlb_num2str","mtlb_ones","mtlb_pcolor","mtlb_plot","mtlb_prod","mtlb_qr","mtlb_qz","mtlb_rand","mtlb_randn","mtlb_rcond","mtlb_realmax","mtlb_realmin","mtlb_repmat","mtlb_s","mtlb_semilogx","mtlb_semilogy","mtlb_setstr","mtlb_size","mtlb_sort","mtlb_sortrows","mtlb_sprintf","mtlb_sscanf","mtlb_std","mtlb_strcmp","mtlb_strcmpi","mtlb_strfind","mtlb_strrep","mtlb_subplot","mtlb_sum","mtlb_t","mtlb_toeplitz","mtlb_tril","mtlb_triu","mtlb_true","mtlb_type","mtlb_uint16","mtlb_uint32","mtlb_uint8","mtlb_upper","mtlb_var","mtlb_zeros","mu2lin","mutation_ga_binary","mutation_ga_default","mvcorrel","mvvacov","nancumsum","nand2mean","nanmax","nanmean","nanmeanf","nanmedian","nanmin","nanstdev","nansum","narsimul","ndgrid","ndims","nehari","neigh_func_csa","neigh_func_default","neigh_func_fsa","neigh_func_vfsa","neldermead_cget","neldermead_configure","neldermead_costf","neldermead_defaultoutput","neldermead_destroy","neldermead_display","neldermead_function","neldermead_get","neldermead_log","neldermead_new","neldermead_restart","neldermead_search","neldermead_updatesimp","nextpow2","nfreq","nicholschart","nlev","nmplot_cget","nmplot_configure","nmplot_contour","nmplot_destroy","nmplot_display","nmplot_function","nmplot_get","nmplot_historyplot","nmplot_log","nmplot_new","nmplot_outputcmd","nmplot_restart","nmplot_search","nmplot_simplexhistory","noisegen","nonreg_test_run","norm","now","null","num2cell","numdiff","numer","nyquist","nyquistfrequencybounds","obs_gram","obscont","observer","obsv_mat","obsvss","oct2dec","odeoptions","optim_ga","optim_moga","optim_nsga","optim_nsga2","optim_sa","optimbase_cget","optimbase_checkbounds","optimbase_checkcostfun","optimbase_checkx0","optimbase_configure","optimbase_destroy","optimbase_display","optimbase_function","optimbase_get","optimbase_hasbounds","optimbase_hasconstraints","optimbase_hasnlcons","optimbase_histget","optimbase_histset","optimbase_incriter","optimbase_isfeasible","optimbase_isinbounds","optimbase_isinnonlincons","optimbase_log","optimbase_logshutdown","optimbase_logstartup","optimbase_new","optimbase_outputcmd","optimbase_outstruct","optimbase_proj2bnds","optimbase_set","optimbase_stoplog","optimbase_terminate","optimget","optimplotfunccount","optimplotfval","optimplotx","optimset","optimsimplex_center","optimsimplex_check","optimsimplex_compsomefv","optimsimplex_computefv","optimsimplex_deltafv","optimsimplex_deltafvmax","optimsimplex_destroy","optimsimplex_dirmat","optimsimplex_fvmean","optimsimplex_fvstdev","optimsimplex_fvvariance","optimsimplex_getall","optimsimplex_getallfv","optimsimplex_getallx","optimsimplex_getfv","optimsimplex_getn","optimsimplex_getnbve","optimsimplex_getve","optimsimplex_getx","optimsimplex_gradientfv","optimsimplex_log","optimsimplex_new","optimsimplex_print","optimsimplex_reflect","optimsimplex_setall","optimsimplex_setallfv","optimsimplex_setallx","optimsimplex_setfv","optimsimplex_setn","optimsimplex_setnbve","optimsimplex_setve","optimsimplex_setx","optimsimplex_shrink","optimsimplex_size","optimsimplex_sort","optimsimplex_tostring","optimsimplex_xbar","orth","p_margin","pack","pareto_filter","parrot","pbig","pca","pcg","pdiv","pen2ea","pencan","pencost","penlaur","perctl","perl","perms","permute","pertrans","pfactors","pfss","phasemag","phaseplot","phc","pinv","playsnd","plotprofile","plzr","pmodulo","pol2des","pol2str","polar","polfact","prbs_a","prettyprint","primes","princomp","profile","proj","projsl","projspec","psmall","pspect","qmr","qpsolve","quart","quaskro","rafiter","randpencil","range","rank","read_csv","readxls","recompilefunction","recons","reglin","regress","remezb","remove_param","remove_profiling","repfreq","replace_Ix_by_Fx","repmat","reset_profiling","resize_matrix","returntoscilab","rhs2code","ric_desc","riccati","rmdir","routh_t","rowcomp","rowcompr","rowinout","rowregul","rowshuff","rref","sample","samplef","samwr","savematfile","savewave","scanf","sci2exp","sciGUI_init","sci_sparse","scicos_getvalue","scicos_simulate","scicos_workspace_init","scisptdemo","scitest","sdiff","sec","secd","sech","selection_ga_elitist","selection_ga_random","sensi","set_param","setdiff","sgrid","show_margins","show_pca","showprofile","signm","sinc","sincd","sind","sinh","sinhm","sinm","sm2des","sm2ss","smga","smooth","solve","sound","soundsec","sp2adj","spaninter","spanplus","spantwo","specfact","speye","sprand","spzeros","sqroot","sqrtm","squarewave","squeeze","srfaur","srkf","ss2des","ss2ss","ss2tf","sscanf","sskf","ssprint","ssrand","st_deviation","st_i_generic","st_ility","stabil","statgain","stdev","stdevf","steadycos","strange","strcmpi","struct","sub2ind","sva","svplot","sylm","sylv","sysconv","sysdiag","sysfact","syslin","syssize","system","systmat","tabul","tand","tanh","tanhm","tanm","tbx_build_blocks","tbx_build_cleaner","tbx_build_gateway","tbx_build_gateway_clean","tbx_build_gateway_loader","tbx_build_help","tbx_build_help_loader","tbx_build_loader","tbx_build_macros","tbx_build_src","tbx_builder","tbx_builder_gateway","tbx_builder_gateway_lang","tbx_builder_help","tbx_builder_help_lang","tbx_builder_macros","tbx_builder_src","tbx_builder_src_lang","temp_law_csa","temp_law_default","temp_law_fsa","temp_law_huang","temp_law_vfsa","test_clean","test_on_columns","test_run","test_run_level","testexamples","tf2des","tf2ss","thrownan","tic","time_id","toc","toeplitz","tokenpos","toolboxes","trace","trans","translatepaths","tree2code","trfmod","trianfml","trimmean","trisolve","trzeros","typeof","ui_observer","union","unique","unit_test_run","unix_g","unix_s","unix_w","unix_x","unobs","unpack","variance","variancef","vec2list","vectorfind","ver","warnobsolete","wavread","wavwrite","wcenter","weekday","wfir","wfir_gui","whereami","who_user","whos","wiener","wigner","winclose","window","winlist","with_javasci","with_macros_source","with_modelica_compiler","with_pvm","with_texmacs","with_tk","write_csv","xcosBlockEval","xcosBlockInterface","xcosCodeGeneration","xcosConfigureModelica","xcosPal","xcosPalAdd","xcosPalAddBlock","xcosPalExport","xcosShowBlockWarning","xcosValidateBlockSet","xcosValidateCompareBlock","xcos_compile","xcos_run","xcos_simulate","xcos_workspace_init","xmltochm","xmltoformat","xmltohtml","xmltojar","xmltopdf","xmltops","xmltoweb","yulewalk","zeropen","zgrid","zpbutt","zpch1","zpch2","zpell"]
builtin_consts = ["\\$","%F","%T","%e","%eps","%f","%fftw","%gui","%i","%inf","%io","%modalWarning","%nan","%pi","%s","%t","%tk","%toolboxes","%toolboxes_dir","%z","PWD","SCI","SCIHOME","TMPDIR","a","ans","assertlib","atomslib","cacsdlib","compatibility_functilib","corelib","data_structureslib","demo_toolslib","development_toolslib","differential_equationlib","dynamic_linklib","elementary_functionslib","fd","fileiolib","functionslib","genetic_algorithmslib","helptoolslib","home","i","integerlib","interpolationlib","iolib","j","linear_algebralib","m2scilib","matiolib","modules_managerlib","myStr","neldermeadlib","optimbaselib","optimizationlib","optimsimplexlib","output_streamlib","overloadinglib","parameterslib","polynomialslib","scicos_autolib","scicos_utilslib","scinoteslib","signal_processinglib","simulated_annealinglib","soundlib","sparselib","special_functionslib","spreadsheetlib","statisticslib","stringlib","tclscilib","timelib","umfpacklib","varType","xcoslib"]
|
mit
|
DreamLiMu/ML_Python
|
les5/logRegres.py
|
1
|
4070
|
#-*-coding:utf-8-*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from numpy import *
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn,classLabels):
dataMatrix = mat(dataMatIn)
labelMat = mat(classLabels).transpose()
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles):
##矩阵相乘
h = sigmoid(dataMatrix*weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose() * error
return weights
def plotBestFit(wei):
import matplotlib.pyplot as plt
##weights = wei.getA() ##调用gradAscent方法时用
weights = wei ##调用stocGradAscent0方法时用
dataMat,labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]);ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]);ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green')
x = arange(-3.0,3.0,0.1)
##最佳拟合直线
y = (-weights[0] - weights[1] * x) / weights[2]
ax.plot(x, y)
plt.xlabel('X1');plt.ylabel('X2')
plt.show()
def stocGradAscent0(dataMatrix,classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(dataMatrix[i]*weights)
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
##alpha每次迭代时需要调整
alpha = 4/(1.0+j+i)+0.01
##随机选取更新
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha *error*dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)
errorCount = 1; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print "the error rate of this test is : %f"%errorRate
return errorRate
def multiTest():
numTests = 10; errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print "after %d iterations the average error rate is :%f" % (numTests, errorSum/float(numTests))
if __name__ == '__main__':
dataMat,labelMat = loadDataSet()
#weights = stocGradAscent1(array(dataMat), labelMat, 500)
#plotBestFit(weights)
multiTest()
|
gpl-2.0
|
patrikpettersson/rest-engine
|
lib/werkzeug/contrib/fixers.py
|
87
|
9071
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urllib import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class LighttpdCGIRootFix(object):
"""Wrap the application in this middleware if you are using lighttpd
with FastCGI or CGI and the application is mounted on the URL root.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = ''
return self.app(environ, start_response)
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default the first one is picked.
.. versionadded:: 0.8
"""
if forwarded_for:
return forwarded_for[0]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
self.fix_headers(environ, Headers.linked(headers), status)
return start_response(status, headers, exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
|
mit
|
sobercoder/gem5
|
util/style/verifiers.py
|
10
|
16542
|
#!/usr/bin/env python2
#
# Copyright (c) 2014, 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006 The Regents of The University of Michigan
# Copyright (c) 2007,2011 The Hewlett-Packard Development Company
# Copyright (c) 2016 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
# Andreas Sandberg
from abc import ABCMeta, abstractmethod
from difflib import SequenceMatcher
import inspect
import os
import re
import sys
import style
import sort_includes
from region import *
from file_types import lang_type
def safefix(fix_func):
""" Decorator for the fix functions of the Verifier class.
This function wraps the fix function and creates a backup file
just in case there is an error.
"""
def safefix_wrapper(*args, **kwargs):
# Check to be sure that this is decorating a function we expect:
# a class method with filename as the first argument (after self)
assert(os.path.exists(args[1]))
self = args[0]
assert(is_verifier(self.__class__))
filename = args[1]
# Now, Let's make a backup file.
from shutil import copyfile
backup_name = filename+'.bak'
copyfile(filename, backup_name)
# Try to apply the fix. If it fails, then we revert the file
# Either way, we need to clean up our backup file
try:
fix_func(*args, **kwargs)
except Exception as e:
# Restore the original file to the backup file
self.ui.write("Error! Restoring the original file.\n")
copyfile(backup_name, filename)
raise
finally:
# Clean up the backup file
os.remove(backup_name)
return safefix_wrapper
def _modified_regions(old, new):
try:
m = SequenceMatcher(a=old, b=new, autojunk=False)
except TypeError:
# autojunk was introduced in Python 2.7. We need a fallback
# mechanism to support old Python versions.
m = SequenceMatcher(a=old, b=new)
regions = Regions()
for tag, i1, i2, j1, j2 in m.get_opcodes():
if tag != "equal":
regions.extend(Region(i1, i2))
return regions
class Verifier(object):
"""Base class for style verifiers
Verifiers check for style violations and optionally fix such
violations. Implementations should either inherit from this class
(Verifier) if they need to work on entire files or LineVerifier if
they operate on a line-by-line basis.
Subclasses must define these class attributes:
languages = set of strings identifying applicable languages
test_name = long descriptive name of test, will be used in
messages such as "error in <foo>" or "invalid <foo>"
opt_name = short name used to generate command-line options to
control the test (--fix-<foo>, --ignore-<foo>, etc.)
"""
__metaclass__ = ABCMeta
def __init__(self, ui, opts, base=None):
self.ui = ui
self.base = base
# opt_name must be defined as a class attribute of derived classes.
# Check test-specific opts first as these have precedence.
self.opt_fix = opts.get('fix_' + self.opt_name, False)
self.opt_ignore = opts.get('ignore_' + self.opt_name, False)
self.opt_skip = opts.get('skip_' + self.opt_name, False)
# If no test-specific opts were set, then set based on "-all" opts.
if not (self.opt_fix or self.opt_ignore or self.opt_skip):
self.opt_fix = opts.get('fix_all', False)
self.opt_ignore = opts.get('ignore_all', False)
self.opt_skip = opts.get('skip_all', False)
def normalize_filename(self, name):
abs_name = os.path.abspath(name)
if self.base is None:
return abs_name
abs_base = os.path.abspath(self.base)
return os.path.relpath(abs_name, start=abs_base)
def open(self, filename, mode):
try:
f = file(filename, mode)
except OSError, msg:
print 'could not open file %s: %s' % (filename, msg)
return None
return f
def skip(self, filename):
# We never want to handle symlinks, so always skip them: If the
# location pointed to is a directory, skip it. If the location is a
# file inside the gem5 directory, it will be checked as a file, so
# symlink can be skipped. If the location is a file outside gem5, we
# don't want to check it anyway.
if os.path.islink(filename):
return True
return lang_type(filename) not in self.languages
def apply(self, filename, regions=all_regions):
"""Possibly apply to specified regions of file 'filename'.
Verifier is skipped if --skip-<test> option was provided or if
file is not of an applicable type. Otherwise file is checked
and error messages printed. Errors are fixed or ignored if
the corresponding --fix-<test> or --ignore-<test> options were
provided. If neither, the user is prompted for an action.
Returns True to abort, False otherwise.
"""
if not (self.opt_skip or self.skip(filename)):
errors = self.check(filename, regions)
if errors and not self.opt_ignore:
if self.opt_fix:
self.fix(filename, regions)
else:
result = self.ui.prompt("(a)bort, (i)gnore, or (f)ix?",
'aif', 'a')
if result == 'f':
self.fix(filename, regions)
elif result == 'a':
return True # abort
return False
@abstractmethod
def check(self, filename, regions=all_regions, fobj=None, silent=False):
"""Check specified regions of file 'filename'.
Given that it is possible that the current contents of the file
differ from the file as 'staged to commit', for those cases, and
maybe others, the argument fobj should be a file object open and reset
with the contents matching what the file would look like after the
commit. This is needed keep the messages using 'filename' meaningful.
The argument silent is useful to prevent output when we run check in
the staged file vs the actual file to detect if the user forgot
staging fixes to the commit. This way, we prevent reporting errors
twice in stderr.
Line-by-line checks can simply provide a check_line() method
that returns True if the line is OK and False if it has an
error. Verifiers that need a multi-line view (like
SortedIncludes) must override this entire function.
Returns a count of errors (0 if none), though actual non-zero
count value is not currently used anywhere.
"""
pass
@abstractmethod
def fix(self, filename, regions=all_regions):
"""Fix specified regions of file 'filename'.
Line-by-line fixes can simply provide a fix_line() method that
returns the fixed line. Verifiers that need a multi-line view
(like SortedIncludes) must override this entire function.
"""
pass
class LineVerifier(Verifier):
def check(self, filename, regions=all_regions, fobj=None, silent=False):
close = False
if fobj is None:
fobj = self.open(filename, 'r')
close = True
lang = lang_type(filename)
assert lang in self.languages
errors = 0
for num,line in enumerate(fobj):
if num not in regions:
continue
line = line.rstrip('\n')
if not self.check_line(line, language=lang):
if not silent:
self.ui.write("invalid %s in %s:%d\n" % \
(self.test_name, filename, num + 1))
if self.ui.verbose:
self.ui.write(">>%s<<\n" % line[:-1])
errors += 1
if close:
fobj.close()
return errors
@safefix
def fix(self, filename, regions=all_regions):
f = self.open(filename, 'r+')
lang = lang_type(filename)
assert lang in self.languages
lines = list(f)
f.seek(0)
f.truncate()
for i,line in enumerate(lines):
line = line.rstrip('\n')
if i in regions:
line = self.fix_line(line, language=lang)
f.write(line)
f.write("\n")
f.close()
self.current_language = None
@abstractmethod
def check_line(self, line, **kwargs):
pass
@abstractmethod
def fix_line(self, line, **kwargs):
pass
class Whitespace(LineVerifier):
"""Check whitespace.
Specifically:
- No tabs used for indent
- No trailing whitespace
"""
languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons',
'make', 'dts'))
trail_only = set(('make', 'dts'))
test_name = 'whitespace'
opt_name = 'white'
_lead = re.compile(r'^([ \t]+)')
_trail = re.compile(r'([ \t]+)$')
def skip_lead(self, language):
return language in Whitespace.trail_only
def check_line(self, line, language):
if not self.skip_lead(language):
match = Whitespace._lead.search(line)
if match and match.group(1).find('\t') != -1:
return False
match = Whitespace._trail.search(line)
if match:
return False
return True
def fix_line(self, line, language):
if not self.skip_lead(language) and Whitespace._lead.search(line):
newline = ''
for i,c in enumerate(line):
if c == ' ':
newline += ' '
elif c == '\t':
newline += ' ' * (style.tabsize - \
len(newline) % style.tabsize)
else:
newline += line[i:]
break
line = newline
return line.rstrip()
class SortedIncludes(Verifier):
"""Check for proper sorting of include statements"""
languages = sort_includes.default_languages
test_name = 'include file order'
opt_name = 'include'
def __init__(self, *args, **kwargs):
super(SortedIncludes, self).__init__(*args, **kwargs)
self.sort_includes = sort_includes.SortIncludes()
def check(self, filename, regions=all_regions, fobj=None, silent=False):
close = False
if fobj is None:
fobj = self.open(filename, 'r')
close = True
norm_fname = self.normalize_filename(filename)
old = [ l.rstrip('\n') for l in fobj.xreadlines() ]
if close:
fobj.close()
if len(old) == 0:
return 0
language = lang_type(filename, old[0])
new = list(self.sort_includes(old, norm_fname, language))
modified = _modified_regions(old, new) & regions
if modified:
if not silent:
self.ui.write("invalid sorting of includes in %s\n"
% (filename))
if self.ui.verbose:
for start, end in modified.regions:
self.ui.write("bad region [%d, %d)\n" % (start, end))
return 1
return 0
@safefix
def fix(self, filename, regions=all_regions):
f = self.open(filename, 'r+')
old = f.readlines()
lines = [ l.rstrip('\n') for l in old ]
language = lang_type(filename, lines[0])
sort_lines = list(self.sort_includes(lines, filename, language))
new = ''.join(line + '\n' for line in sort_lines)
f.seek(0)
f.truncate()
for i,line in enumerate(sort_lines):
f.write(line)
f.write('\n')
f.close()
class ControlSpace(LineVerifier):
"""Check for exactly one space after if/while/for"""
languages = set(('C', 'C++'))
test_name = 'spacing after if/while/for'
opt_name = 'control'
_any_control = re.compile(r'\b(if|while|for)([ \t]*)\(')
def check_line(self, line, **kwargs):
match = ControlSpace._any_control.search(line)
return not (match and match.group(2) != " ")
def fix_line(self, line, **kwargs):
new_line = ControlSpace._any_control.sub(r'\1 (', line)
return new_line
class LineLength(LineVerifier):
languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons'))
test_name = 'line length'
opt_name = 'length'
def check_line(self, line, **kwargs):
return style.normalized_len(line) <= 79
def fix(self, filename, regions=all_regions, **kwargs):
self.ui.write("Warning: cannot automatically fix overly long lines.\n")
def fix_line(self, line):
pass
class ControlCharacters(LineVerifier):
languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons'))
test_name = 'control character'
opt_name = 'ascii'
valid = ('\n', '\t')
invalid = "".join([chr(i) for i in range(0, 0x20) if chr(i) not in valid])
def check_line(self, line, **kwargs):
return self.fix_line(line) == line
def fix_line(self, line, **kwargs):
return line.translate(None, ControlCharacters.invalid)
class BoolCompare(LineVerifier):
languages = set(('C', 'C++', 'python'))
test_name = 'boolean comparison'
opt_name = 'boolcomp'
regex = re.compile(r'\s*==\s*([Tt]rue|[Ff]alse)\b')
def check_line(self, line, **kwargs):
return self.regex.search(line) == None
def fix_line(self, line, **kwargs):
match = self.regex.search(line)
if match:
if match.group(1) in ('true', 'True'):
line = self.regex.sub('', line)
else:
self.ui.write("Warning: cannot automatically fix "
"comparisons with false/False.\n")
return line
def is_verifier(cls):
"""Determine if a class is a Verifier that can be instantiated"""
return inspect.isclass(cls) and issubclass(cls, Verifier) and \
not inspect.isabstract(cls)
# list of all verifier classes
all_verifiers = [ v for n, v in \
inspect.getmembers(sys.modules[__name__], is_verifier) ]
|
bsd-3-clause
|
glmcdona/meddle
|
examples/base/Lib/email/mime/message.py
|
573
|
1286
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Class representing message/* MIME documents."""
__all__ = ['MIMEMessage']
from email import message
from email.mime.nonmultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
|
mit
|
davits/ycmd
|
ycmd/tests/go/diagnostics_test.py
|
4
|
4392
|
# Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
has_entries,
has_entry )
from pprint import pformat
import json
from ycmd.tests.go import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
LocationMatcher,
PollForMessages,
PollForMessagesTimeoutException,
RangeMatcher,
WaitForDiagnosticsToBeReady,
WithRetry )
from ycmd.utils import ReadFile
MAIN_FILEPATH = PathToTestFile( 'goto.go' )
DIAG_MATCHERS_PER_FILE = {
MAIN_FILEPATH: contains_inanyorder(
has_entries( {
'kind': 'ERROR',
'text': 'undeclared name: diagnostics_test',
'location': LocationMatcher( MAIN_FILEPATH, 12, 5 ),
'location_extent': RangeMatcher( MAIN_FILEPATH, ( 12, 5 ), ( 12, 21 ) ),
'ranges': contains_exactly( RangeMatcher( MAIN_FILEPATH,
( 12, 5 ),
( 12, 21 ) ) ),
'fixit_available': False
} )
)
}
@WithRetry
@SharedYcmd
def Diagnostics_DetailedDiags_test( app ):
filepath = PathToTestFile( 'goto.go' )
contents = ReadFile( filepath )
WaitForDiagnosticsToBeReady( app, filepath, contents, 'go' )
request_data = BuildRequest( contents = contents,
filepath = filepath,
filetype = 'go',
line_num = 12,
column_num = 5 )
results = app.post_json( '/detailed_diagnostic', request_data ).json
assert_that( results,
has_entry( 'message', 'undeclared name: diagnostics_test' ) )
@WithRetry
@SharedYcmd
def Diagnostics_FileReadyToParse_test( app ):
filepath = PathToTestFile( 'goto.go' )
contents = ReadFile( filepath )
# It can take a while for the diagnostics to be ready.
results = WaitForDiagnosticsToBeReady( app, filepath, contents, 'go' )
print( f'completer response: { pformat( results ) }' )
assert_that( results, DIAG_MATCHERS_PER_FILE[ filepath ] )
@WithRetry
@SharedYcmd
def Diagnostics_Poll_test( app ):
filepath = PathToTestFile( 'goto.go' )
contents = ReadFile( filepath )
# Poll until we receive _all_ the diags asynchronously.
to_see = sorted( DIAG_MATCHERS_PER_FILE.keys() )
seen = {}
try:
for message in PollForMessages( app,
{ 'filepath': filepath,
'contents': contents,
'filetype': 'go' } ):
if 'diagnostics' in message:
if message[ 'filepath' ] not in DIAG_MATCHERS_PER_FILE:
continue
seen[ message[ 'filepath' ] ] = True
assert_that( message, has_entries( {
'diagnostics': DIAG_MATCHERS_PER_FILE[ message[ 'filepath' ] ],
'filepath': message[ 'filepath' ]
} ) )
if sorted( seen.keys() ) == to_see:
break
# Eventually PollForMessages will throw a timeout exception and we'll fail
# if we don't see all of the expected diags.
except PollForMessagesTimeoutException as e:
raise AssertionError(
str( e ) +
'Timed out waiting for full set of diagnostics. '
f'Expected to see diags for { json.dumps( to_see, indent = 2 ) }, '
f'but only saw { json.dumps( sorted( seen.keys() ), indent = 2 ) }.' )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True
|
gpl-3.0
|
ravencoin/raven
|
contrib/testgen/gen_base58_test_vectors.py
|
1064
|
4344
|
#!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
|
mit
|
mheap/ansible
|
lib/ansible/modules/network/meraki/meraki_organization.py
|
3
|
7828
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_organization
short_description: Manage organizations in the Meraki cloud
version_added: "2.6"
description:
- Allows for creation, management, and visibility into organizations within Meraki.
notes:
- More information about the Meraki API can be found at U(https://dashboard.meraki.com/api_docs).
- Some of the options are likely only used for developers within Meraki.
options:
state:
description:
- Create or modify an organization.
choices: ['present', 'query']
default: present
clone:
description:
- Organization to clone to a new organization.
org_name:
description:
- Name of organization.
- If C(clone) is specified, C(org_name) is the name of the new organization.
aliases: [ name, organization ]
org_id:
description:
- ID of organization.
aliases: [ id ]
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Create a new organization named YourOrg
meraki_organization:
auth_key: abc12345
org_name: YourOrg
state: present
delegate_to: localhost
- name: Query information about all organizations associated to the user
meraki_organization:
auth_key: abc12345
state: query
delegate_to: localhost
- name: Query information about a single organization named YourOrg
meraki_organization:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Rename an organization to RenamedOrg
meraki_organization:
auth_key: abc12345
org_id: 987654321
org_name: RenamedOrg
state: present
delegate_to: localhost
- name: Clone an organization named Org to a new one called ClonedOrg
meraki_organization:
auth_key: abc12345
clone: Org
org_name: ClonedOrg
state: present
delegate_to: localhost
'''
RETURN = r'''
response:
description: Data returned from Meraki dashboard.
type: dict
returned: info
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def get_org(meraki, org_id, data):
# meraki.fail_json(msg=str(org_id), data=data, oid0=data[0]['id'], oid1=data[1]['id'])
for o in data:
# meraki.fail_json(msg='o', data=o['id'], type=str(type(o['id'])))
if o['id'] == org_id:
return o
return -1
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(clone=dict(type='str'),
state=dict(type='str', choices=['present', 'query'], default='present'),
org_name=dict(type='str', aliases=['name', 'organization']),
org_id=dict(type='int', aliases=['id']),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='organizations')
meraki.params['follow_redirects'] = 'all'
create_urls = {'organizations': '/organizations',
}
update_urls = {'organizations': '/organizations/{org_id}',
}
clone_urls = {'organizations': '/organizations/{org_id}/clone',
}
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['clone'] = clone_urls
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# FIXME: Work with Meraki so they can implement a check mode
if module.check_mode:
meraki.exit_json(**meraki.result)
# execute checks for argument completeness
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
orgs = meraki.get_orgs()
if meraki.params['state'] == 'query':
if meraki.params['org_name']: # Query by organization name
module.warn('All matching organizations will be returned, even if there are duplicate named organizations')
for o in orgs:
if o['name'] == meraki.params['org_name']:
meraki.result['data'] = o
elif meraki.params['org_id']:
for o in orgs:
if o['id'] == meraki.params['org_id']:
meraki.result['data'] = o
else: # Query all organizations, no matter what
meraki.result['data'] = orgs
elif meraki.params['state'] == 'present':
if meraki.params['clone']: # Cloning
payload = {'name': meraki.params['org_name']}
meraki.result['data'] = json.loads(
meraki.request(
meraki.construct_path(
'clone',
org_name=meraki.params['clone']
),
payload=json.dumps(payload),
method='POST'))
meraki.result['changed'] = True
elif not meraki.params['org_id'] and meraki.params['org_name']: # Create new organization
payload = {'name': meraki.params['org_name']}
meraki.result['data'] = json.loads(
meraki.request(
meraki.construct_path('create'),
method='POST',
payload=json.dumps(payload)))
meraki.result['changed'] = True
elif meraki.params['org_id'] and meraki.params['org_name']: # Update an existing organization
payload = {'name': meraki.params['org_name'],
'id': meraki.params['org_id'],
}
if meraki.is_update_required(
get_org(
meraki,
meraki.params['org_id'],
orgs),
payload):
meraki.result['data'] = json.loads(
meraki.request(
meraki.construct_path(
'update',
org_id=meraki.params['org_id']
),
method='PUT',
payload=json.dumps(payload)))
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
LyonsLab/coge
|
bin/last_wrapper/Bio/SeqUtils/__init__.py
|
3
|
11238
|
#!/usr/bin/env python
# Created: Wed May 29 08:07:18 2002
# [email protected], [email protected]
# Copyright 2001 by Thomas Sicheritz-Ponten and Cecilia Alsmark.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Miscellaneous functions for dealing with sequences."""
import re, time
from Bio import SeqIO
from Bio.Seq import Seq
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData, CodonTable
######################################
# DNA
######################
# {{{
def GC(seq):
"""Calculates G+C content, returns the percentage (float between 0 and 100).
Copes mixed case sequences, and with the ambiguous nucleotide S (G or C)
when counting the G and C content. The percentage is calculated against
the full length, e.g.:
>>> from Bio.SeqUtils import GC
>>> GC("ACTGN")
40.0
Note that this will return zero for an empty sequence.
"""
try:
gc = sum(map(seq.count,['G','C','g','c','S','s']))
return gc*100.0/len(seq)
except ZeroDivisionError:
return 0.0
def GC123(seq):
"""Calculates total G+C content plus first, second and third positions.
Returns a tuple of four floats (percentages between 0 and 100) for the
entire sequence, and the three codon positions. e.g.
>>> from Bio.SeqUtils import GC123
>>> GC123("ACTGTN")
(40.0, 50.0, 50.0, 0.0)
Copes with mixed case sequences, but does NOT deal with ambiguous
nucleotides.
"""
d= {}
for nt in ['A','T','G','C']:
d[nt] = [0,0,0]
for i in range(0,len(seq),3):
codon = seq[i:i+3]
if len(codon) <3: codon += ' '
for pos in range(0,3):
for nt in ['A','T','G','C']:
if codon[pos] == nt or codon[pos] == nt.lower():
d[nt][pos] += 1
gc = {}
gcall = 0
nall = 0
for i in range(0,3):
try:
n = d['G'][i] + d['C'][i] +d['T'][i] + d['A'][i]
gc[i] = (d['G'][i] + d['C'][i])*100.0/n
except:
gc[i] = 0
gcall = gcall + d['G'][i] + d['C'][i]
nall = nall + n
gcall = 100.0*gcall/nall
return gcall, gc[0], gc[1], gc[2]
def GC_skew(seq, window = 100):
"""Calculates GC skew (G-C)/(G+C) for multuple windows along the sequence.
Returns a list of ratios (floats), controlled by the length of the sequence
and the size of the window.
Does NOT look at any ambiguous nucleotides.
"""
# 8/19/03: Iddo: added lowercase
values = []
for i in range(0, len(seq), window):
s = seq[i: i + window]
g = s.count('G') + s.count('g')
c = s.count('C') + s.count('c')
skew = (g-c)/float(g+c)
values.append(skew)
return values
from math import pi, sin, cos, log
def xGC_skew(seq, window = 1000, zoom = 100,
r = 300, px = 100, py = 100):
"""Calculates and plots normal and accumulated GC skew (GRAPHICS !!!)."""
from Tkinter import Scrollbar, Canvas, BOTTOM, BOTH, ALL, \
VERTICAL, HORIZONTAL, RIGHT, LEFT, X, Y
yscroll = Scrollbar(orient = VERTICAL)
xscroll = Scrollbar(orient = HORIZONTAL)
canvas = Canvas(yscrollcommand = yscroll.set,
xscrollcommand = xscroll.set, background = 'white')
win = canvas.winfo_toplevel()
win.geometry('700x700')
yscroll.config(command = canvas.yview)
xscroll.config(command = canvas.xview)
yscroll.pack(side = RIGHT, fill = Y)
xscroll.pack(side = BOTTOM, fill = X)
canvas.pack(fill=BOTH, side = LEFT, expand = 1)
canvas.update()
X0, Y0 = r + px, r + py
x1, x2, y1, y2 = X0 - r, X0 + r, Y0 -r, Y0 + r
ty = Y0
canvas.create_text(X0, ty, text = '%s...%s (%d nt)' % (seq[:7], seq[-7:], len(seq)))
ty +=20
canvas.create_text(X0, ty, text = 'GC %3.2f%%' % (GC(seq)))
ty +=20
canvas.create_text(X0, ty, text = 'GC Skew', fill = 'blue')
ty +=20
canvas.create_text(X0, ty, text = 'Accumulated GC Skew', fill = 'magenta')
ty +=20
canvas.create_oval(x1,y1, x2, y2)
acc = 0
start = 0
for gc in GC_skew(seq, window):
r1 = r
acc+=gc
# GC skew
alpha = pi - (2*pi*start)/len(seq)
r2 = r1 - gc*zoom
x1 = X0 + r1 * sin(alpha)
y1 = Y0 + r1 * cos(alpha)
x2 = X0 + r2 * sin(alpha)
y2 = Y0 + r2 * cos(alpha)
canvas.create_line(x1,y1,x2,y2, fill = 'blue')
# accumulated GC skew
r1 = r - 50
r2 = r1 - acc
x1 = X0 + r1 * sin(alpha)
y1 = Y0 + r1 * cos(alpha)
x2 = X0 + r2 * sin(alpha)
y2 = Y0 + r2 * cos(alpha)
canvas.create_line(x1,y1,x2,y2, fill = 'magenta')
canvas.update()
start += window
canvas.configure(scrollregion = canvas.bbox(ALL))
def molecular_weight(seq):
"""Calculate the molecular weight of a DNA sequence."""
if type(seq) == type(''): seq = Seq(seq, IUPAC.unambiguous_dna)
weight_table = IUPACData.unambiguous_dna_weights
return sum(weight_table[x] for x in seq)
def nt_search(seq, subseq):
"""Search for a DNA subseq in sequence.
use ambiguous values (like N = A or T or C or G, R = A or G etc.)
searches only on forward strand
"""
pattern = ''
for nt in subseq:
value = IUPACData.ambiguous_dna_values[nt]
if len(value) == 1:
pattern += value
else:
pattern += '[%s]' % value
pos = -1
result = [pattern]
l = len(seq)
while True:
pos+=1
s = seq[pos:]
m = re.search(pattern, s)
if not m: break
pos += int(m.start(0))
result.append(pos)
return result
# }}}
######################################
# Protein
######################
# {{{
def seq3(seq):
"""Turn a one letter code protein sequence into one with three letter codes.
The single input argument 'seq' should be a protein sequence using single
letter codes, either as a python string or as a Seq or MutableSeq object.
This function returns the amino acid sequence as a string using the three
letter amino acid codes. Output follows the IUPAC standard (including
ambiguous characters B for "Asx", J for "Xle" and X for "Xaa", and also U
for "Sel" and O for "Pyl") plus "Ter" for a terminator given as an asterisk.
Any unknown character (including possible gap characters), is changed into
'Xaa'.
e.g.
>>> from Bio.SeqUtils import seq3
>>> seq3("MAIVMGRWKGAR*")
'MetAlaIleValMetGlyArgTrpLysGlyAlaArgTer'
This function was inspired by BioPerl's seq3.
"""
threecode = {'A':'Ala', 'B':'Asx', 'C':'Cys', 'D':'Asp',
'E':'Glu', 'F':'Phe', 'G':'Gly', 'H':'His',
'I':'Ile', 'K':'Lys', 'L':'Leu', 'M':'Met',
'N':'Asn', 'P':'Pro', 'Q':'Gln', 'R':'Arg',
'S':'Ser', 'T':'Thr', 'V':'Val', 'W':'Trp',
'Y':'Tyr', 'Z':'Glx', 'X':'Xaa', '*':'Ter',
'U':'Sel', 'O':'Pyl', 'J':'Xle',
}
#We use a default of 'Xaa' for undefined letters
#Note this will map '-' to 'Xaa' which may be undesirable!
return ''.join([threecode.get(aa,'Xaa') for aa in seq])
# }}}
######################################
# Mixed ???
######################
# {{{
def six_frame_translations(seq, genetic_code = 1):
"""Formatted string showing the 6 frame translations and GC content.
nice looking 6 frame translation with GC content - code from xbbtools
similar to DNA Striders six-frame translation
e.g.
from Bio.SeqUtils import six_frame_translations
print six_frame_translations("AUGGCCAUUGUAAUGGGCCGCUGA")
"""
from Bio.Seq import reverse_complement, translate
anti = reverse_complement(seq)
comp = anti[::-1]
length = len(seq)
frames = {}
for i in range(0,3):
frames[i+1] = translate(seq[i:], genetic_code)
frames[-(i+1)] = reverse(translate(anti[i:], genetic_code))
# create header
if length > 20:
short = '%s ... %s' % (seq[:10], seq[-10:])
else:
short = seq
#TODO? Remove the date as this would spoil any unit test...
date = time.strftime('%y %b %d, %X', time.localtime(time.time()))
header = 'GC_Frame: %s, ' % date
for nt in ['a','t','g','c']:
header += '%s:%d ' % (nt, seq.count(nt.upper()))
header += '\nSequence: %s, %d nt, %0.2f %%GC\n\n\n' % (short.lower(),length, GC(seq))
res = header
for i in range(0,length,60):
subseq = seq[i:i+60]
csubseq = comp[i:i+60]
p = i/3
res = res + '%d/%d\n' % (i+1, i/3+1)
res = res + ' ' + ' '.join(map(None,frames[3][p:p+20])) + '\n'
res = res + ' ' + ' '.join(map(None,frames[2][p:p+20])) + '\n'
res = res + ' '.join(map(None,frames[1][p:p+20])) + '\n'
# seq
res = res + subseq.lower() + '%5d %%\n' % int(GC(subseq))
res = res + csubseq.lower() + '\n'
# - frames
res = res + ' '.join(map(None,frames[-2][p:p+20])) +' \n'
res = res + ' ' + ' '.join(map(None,frames[-1][p:p+20])) + '\n'
res = res + ' ' + ' '.join(map(None,frames[-3][p:p+20])) + '\n\n'
return res
# }}}
######################################
# FASTA file utilities
######################
# {{{
def quick_FASTA_reader(file):
"""Simple FASTA reader, returning a list of string tuples.
The single argument 'file' should be the filename of a FASTA format file.
This function will open and read in the entire file, constructing a list
of all the records, each held as a tuple of strings (the sequence name or
title, and its sequence).
This function was originally intended for use on large files, where its
low overhead makes it very fast. However, because it returns the data as
a single in memory list, this can require a lot of RAM on large files.
You are generally encouraged to use Bio.SeqIO.parse(handle, "fasta") which
allows you to iterate over the records one by one (avoiding having all the
records in memory at once). Using Bio.SeqIO also makes it easy to switch
between different input file formats. However, please note that rather
than simple strings, Bio.SeqIO uses SeqRecord objects for each record.
"""
#Want to split on "\n>" not just ">" in case there are any extra ">"
#in the name/description. So, in order to make sure we also split on
#the first entry, prepend a "\n" to the start of the file.
handle = open(file)
txt = "\n" + handle.read()
handle.close()
entries = []
for entry in txt.split('\n>')[1:]:
name,seq= entry.split('\n',1)
seq = seq.replace('\n','').replace(' ','').upper()
entries.append((name, seq))
return entries
# }}}
def _test():
"""Run the Bio.SeqUtils module's doctests (PRIVATE)."""
print "Runing doctests..."
import doctest
doctest.testmod()
print "Done"
if __name__ == "__main__":
_test()
|
bsd-2-clause
|
polaris-gslb/polaris-core
|
polaris_common/topology.py
|
2
|
2452
|
# -*- coding: utf-8 -*-
import ipaddress
__all__ = [
'config_to_map',
'get_region'
]
def config_to_map(topology_config):
"""
args:
topology_config: dict
{
'region1': [
'10.1.1.0/24',
'10.1.10.0/24',
'172.16.1.0/24'
],
'region2': [
'192.168.1.0/24',
'10.2.0.0/16',
]
}
Region cannot be "_default"
returns:
topology_map: dict
{
ip_network('10.1.1.0/24'): 'region1',
ip_network('10.1.10.0/24'): 'region1',
ip_network('172.16.1.0/24'): 'region1',
ip_network('192.168.1.0/24'): 'region2',
ip_network('10.2.0.0/16'): 'region2',
}
raises:
ValueError: if a region value is "_default"
"""
topology_map = {}
for region in topology_config:
# "_default" cannot be used as a region name
if region == '_default':
raise ValueError('cannot use "_default" as a region name')
for net_str in topology_config[region]:
net = ipaddress.ip_network(net_str)
topology_map[net] = region
return topology_map
def get_region(ip_str, topology_map):
"""Return name of a region from the topology map for
the given IP address, if multiple networks contain the IP,
region of the most specific(longest prefix length) match is returned,
if multiple equal prefix length found the behavior of which
entry is returned is undefined.
args:
ip_str: string representing an IP address
returns:
string: region name
None: if no region has been found
raises:
ValueError: raised by ipaddress if ip_str isn't a valid IP address
"""
ip = ipaddress.ip_address(ip_str)
# find all the matching networks
matches = []
for net in topology_map:
if ip in net:
matches.append(net)
# if only a single match is found return it
if len(matches) == 1:
return topology_map[matches[0]]
# if more than 1 match is found, sort the matches
# by prefixlen, return the longest prefixlen entry
elif len(matches) > 1:
matches.sort(key=lambda net: net.prefixlen)
return topology_map[matches[-1]]
# no matches found
return None
|
bsd-3-clause
|
40223102/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/constants.py
|
603
|
15297
|
#!/usr/bin/env python
'''Constants defined by SDL, and needed in pygame.
Note that many of the flags for SDL are not needed in pygame, and are not
included here. These constants are generally accessed from the
`pygame.locals` module. This module is automatically placed in the pygame
namespace, but you will usually want to place them directly into your module's
namespace with the following command::
from pygame.locals import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
#import SDL.constants
# SDL constants taken from https://wiki.libsdl.org/SDLKeycodeLookup
'''
YV12_OVERLAY = SDL.constants.SDL_YV12_OVERLAY
IYUV_OVERLAY = SDL.constants.SDL_IYUV_OVERLAY
YUY2_OVERLAY = SDL.constants.SDL_YUY2_OVERLAY
UYVY_OVERLAY = SDL.constants.SDL_UYVY_OVERLAY
YVYU_OVERLAY = SDL.constants.SDL_YVYU_OVERLAY
SWSURFACE = SDL.constants.SDL_SWSURFACE
HWSURFACE = SDL.constants.SDL_HWSURFACE
RESIZABLE = SDL.constants.SDL_RESIZABLE
ASYNCBLIT = SDL.constants.SDL_ASYNCBLIT
OPENGL = SDL.constants.SDL_OPENGL
OPENGLBLIT = SDL.constants.SDL_OPENGLBLIT
ANYFORMAT = SDL.constants.SDL_ANYFORMAT
HWPALETTE = SDL.constants.SDL_HWPALETTE
DOUBLEBUF = SDL.constants.SDL_DOUBLEBUF
#FULLSCREEN = SDL.constants.SDL_FULLSCREEN
'''
FULLSCREEN = 0
'''
HWACCEL = SDL.constants.SDL_HWACCEL
SRCCOLORKEY = SDL.constants.SDL_SRCCOLORKEY
'''
RLEACCELOK = 254
RLEACCEL = 255
'''
SRCALPHA = SDL.constants.SDL_SRCALPHA
PREALLOC = SDL.constants.SDL_PREALLOC
NOFRAME = SDL.constants.SDL_NOFRAME
GL_RED_SIZE = SDL.constants.SDL_GL_RED_SIZE
GL_GREEN_SIZE = SDL.constants.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = SDL.constants.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = SDL.constants.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = SDL.constants.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = SDL.constants.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = SDL.constants.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = SDL.constants.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = SDL.constants.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = SDL.constants.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = SDL.constants.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = SDL.constants.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = SDL.constants.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = SDL.constants.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = SDL.constants.SDL_GL_MULTISAMPLESAMPLES
TIMER_RESOLUTION = SDL.constants.TIMER_RESOLUTION
AUDIO_U8 = SDL.constants.AUDIO_U8
AUDIO_S8 = SDL.constants.AUDIO_S8
AUDIO_U16LSB = SDL.constants.AUDIO_U16LSB
AUDIO_S16LSB = SDL.constants.AUDIO_S16LSB
AUDIO_U16MSB = SDL.constants.AUDIO_U16MSB
AUDIO_S16MSB = SDL.constants.AUDIO_S16MSB
AUDIO_U16 = SDL.constants.AUDIO_U16
AUDIO_S16 = SDL.constants.AUDIO_S16
AUDIO_U16SYS = SDL.constants.AUDIO_U16SYS
AUDIO_S16SYS = SDL.constants.AUDIO_S16SYS
'''
def _t(a, b, c, d):
return (ord(a) << 24) | (ord(b) << 16) | (ord(c) << 8) | ord(d)
SCRAP_TEXT = _t('T', 'E', 'X', 'T')
SCRAP_BMP = _t('B', 'M', 'P', ' ')
BLEND_ADD = 0x01
BLEND_SUB = 0x02
BLEND_MULT = 0x03
BLEND_MIN = 0x04
BLEND_MAX = 0x05
"""
NOEVENT = SDL.constants.SDL_NOEVENT
ACTIVEEVENT = SDL.constants.SDL_ACTIVEEVENT
KEYDOWN = SDL.constants.SDL_KEYDOWN
KEYUP = SDL.constants.SDL_KEYUP
MOUSEMOTION = SDL.constants.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = SDL.constants.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = SDL.constants.SDL_MOUSEBUTTONUP
JOYAXISMOTION = SDL.constants.SDL_JOYAXISMOTION
JOYBALLMOTION = SDL.constants.SDL_JOYBALLMOTION
JOYHATMOTION = SDL.constants.SDL_JOYHATMOTION
JOYBUTTONDOWN = SDL.constants.SDL_JOYBUTTONDOWN
JOYBUTTONUP = SDL.constants.SDL_JOYBUTTONUP
VIDEORESIZE = SDL.constants.SDL_VIDEORESIZE
VIDEOEXPOSE = SDL.constants.SDL_VIDEOEXPOSE
QUIT = SDL.constants.SDL_QUIT
SYSWMEVENT = SDL.constants.SDL_SYSWMEVENT
USEREVENT = SDL.constants.SDL_USEREVENT
NUMEVENTS = SDL.constants.SDL_NUMEVENTS
HAT_CENTERED = SDL.constants.SDL_HAT_CENTERED
HAT_UP = SDL.constants.SDL_HAT_UP
HAT_RIGHTUP = SDL.constants.SDL_HAT_RIGHTUP
HAT_RIGHT = SDL.constants.SDL_HAT_RIGHT
HAT_RIGHTDOWN = SDL.constants.SDL_HAT_RIGHTDOWN
HAT_DOWN = SDL.constants.SDL_HAT_DOWN
HAT_LEFTDOWN = SDL.constants.SDL_HAT_LEFTDOWN
HAT_LEFT = SDL.constants.SDL_HAT_LEFT
HAT_LEFTUP = SDL.constants.SDL_HAT_LEFTUP
"""
#BEGIN GENERATED CONSTANTS; see support/make_pygame_keyconstants.py
K_0 = 48
K_1 = 49
K_2 = 50
K_3 = 51
K_4 = 52
K_5 = 53
K_6 = 54
K_7 = 55
K_8 = 56
K_9 = 57
K_AMPERSAND = 38
K_ASTERISK = 42
K_AT = 64
K_BACKQUOTE = 96
K_BACKSLASH = 92
K_BACKSPACE = 8
#K_BREAK = SDL.constants.SDLK_BREAK
K_CAPSLOCK = 1073741881
K_CARET = 94
K_CLEAR = 1073742040
K_COLON = 58
K_COMMA = 44
#K_COMPOSE = SDL.constants.SDLK_COMPOSE
K_DELETE = 127
K_DOLLAR = 36
K_DOWN = 1073741905
K_END = 1073741901
K_EQUALS = 1073741927
K_ESCAPE = 27
#K_EURO = SDL.constants.SDLK_EURO
K_EXCLAIM = 33
K_F1 = 1073741882
K_F10 = 1073741891
K_F11 = 1073741892
K_F12 = 1073741893
K_F13 = 1073741928
K_F14 = 1073741929
K_F15 = 1073741930
K_F2 = 1073741883
K_F3 = 1073741884
K_F4 = 1073741885
K_F5 = 1073741886
K_F6 = 1073741887
K_F7 = 1073741888
K_F8 = 1073741889
K_F9 = 1073741890
#K_FIRST = SDL.constants.SDLK_FIRST
K_GREATER = 1073742022
K_HASH = 1073742028
K_HELP = 1073741941
K_HOME = 1073741898
K_INSERT = 1073741897
K_KP0 = 1073741922
K_KP1 = 1073741913
K_KP2 = 1073741914
K_KP3 = 1073741915
K_KP4 = 1073741916
K_KP5 = 1073741917
K_KP6 = 1073741918
K_KP7 = 1073741919
K_KP8 = 1073741920
K_KP9 = 1073741921
K_KP_DIVIDE = 1073741908
K_KP_ENTER = 1073741912
K_KP_EQUALS = 1073741927
K_KP_MINUS = 1073741910
K_KP_MULTIPLY = 1073741909
K_KP_PERIOD = 1073741923
K_KP_PLUS = 1073741911
K_LALT = 1073742050
#K_LAST = SDL.constants.SDLK_LAST
K_LCTRL = 1073742048
K_LEFT = 1073741904
#K_LEFTBRACKET = SDL.constants.SDLK_LEFTBRACKET
K_LEFTPAREN = 1073742006
#K_LESS = SDL.constants.SDLK_LESS
#K_LMETA = SDL.constants.SDLK_LMETA
K_LSHIFT = 1073742049
#K_LSUPER = SDL.constants.SDLK_LSUPER
K_MENU = 1073741942
K_MINUS = 45
K_MODE = 1073742081
#K_NUMLOCK = SDL.constants.SDLK_NUMLOCK
K_PAGEDOWN = 1073741902
K_PAGEUP = 1073741899
K_PAUSE = 1073741896
#K_PERIOD = SDL.constants.SDLK_PERIOD
K_PLUS = 43
#K_POWER = SDL.constants.SDLK_POWER
#K_PRINT = SDL.constants.SDLK_PRINT
K_QUESTION = 63
K_QUOTE = 39
K_QUOTEDBL = 34
K_RALT = 1073742054
K_RCTRL = 1073742052
K_RETURN = 13
K_RIGHT = 1073741903
#K_RIGHTBRACKET = SDL.constants.SDLK_RIGHTBRACKET
K_RIGHTPAREN = 41
#K_RMETA = SDL.constants.SDLK_RMETA
K_RSHIFT = 1073742053
#K_RSUPER = SDL.constants.SDLK_RSUPER
K_SCROLLOCK = 1073741895
K_SEMICOLON = 59
K_SLASH = 47
K_SPACE = 1073742029
K_SYSREQ = 1073741978
K_TAB = 9
K_UNDERSCORE = 95
K_UNDO = 1073741946
K_UNKNOWN = 0
K_UP = 1073741906
"""
K_WORLD_0 = SDL.constants.SDLK_WORLD_0
K_WORLD_1 = SDL.constants.SDLK_WORLD_1
K_WORLD_10 = SDL.constants.SDLK_WORLD_10
K_WORLD_11 = SDL.constants.SDLK_WORLD_11
K_WORLD_12 = SDL.constants.SDLK_WORLD_12
K_WORLD_13 = SDL.constants.SDLK_WORLD_13
K_WORLD_14 = SDL.constants.SDLK_WORLD_14
K_WORLD_15 = SDL.constants.SDLK_WORLD_15
K_WORLD_16 = SDL.constants.SDLK_WORLD_16
K_WORLD_17 = SDL.constants.SDLK_WORLD_17
K_WORLD_18 = SDL.constants.SDLK_WORLD_18
K_WORLD_19 = SDL.constants.SDLK_WORLD_19
K_WORLD_2 = SDL.constants.SDLK_WORLD_2
K_WORLD_20 = SDL.constants.SDLK_WORLD_20
K_WORLD_21 = SDL.constants.SDLK_WORLD_21
K_WORLD_22 = SDL.constants.SDLK_WORLD_22
K_WORLD_23 = SDL.constants.SDLK_WORLD_23
K_WORLD_24 = SDL.constants.SDLK_WORLD_24
K_WORLD_25 = SDL.constants.SDLK_WORLD_25
K_WORLD_26 = SDL.constants.SDLK_WORLD_26
K_WORLD_27 = SDL.constants.SDLK_WORLD_27
K_WORLD_28 = SDL.constants.SDLK_WORLD_28
K_WORLD_29 = SDL.constants.SDLK_WORLD_29
K_WORLD_3 = SDL.constants.SDLK_WORLD_3
K_WORLD_30 = SDL.constants.SDLK_WORLD_30
K_WORLD_31 = SDL.constants.SDLK_WORLD_31
K_WORLD_32 = SDL.constants.SDLK_WORLD_32
K_WORLD_33 = SDL.constants.SDLK_WORLD_33
K_WORLD_34 = SDL.constants.SDLK_WORLD_34
K_WORLD_35 = SDL.constants.SDLK_WORLD_35
K_WORLD_36 = SDL.constants.SDLK_WORLD_36
K_WORLD_37 = SDL.constants.SDLK_WORLD_37
K_WORLD_38 = SDL.constants.SDLK_WORLD_38
K_WORLD_39 = SDL.constants.SDLK_WORLD_39
K_WORLD_4 = SDL.constants.SDLK_WORLD_4
K_WORLD_40 = SDL.constants.SDLK_WORLD_40
K_WORLD_41 = SDL.constants.SDLK_WORLD_41
K_WORLD_42 = SDL.constants.SDLK_WORLD_42
K_WORLD_43 = SDL.constants.SDLK_WORLD_43
K_WORLD_44 = SDL.constants.SDLK_WORLD_44
K_WORLD_45 = SDL.constants.SDLK_WORLD_45
K_WORLD_46 = SDL.constants.SDLK_WORLD_46
K_WORLD_47 = SDL.constants.SDLK_WORLD_47
K_WORLD_48 = SDL.constants.SDLK_WORLD_48
K_WORLD_49 = SDL.constants.SDLK_WORLD_49
K_WORLD_5 = SDL.constants.SDLK_WORLD_5
K_WORLD_50 = SDL.constants.SDLK_WORLD_50
K_WORLD_51 = SDL.constants.SDLK_WORLD_51
K_WORLD_52 = SDL.constants.SDLK_WORLD_52
K_WORLD_53 = SDL.constants.SDLK_WORLD_53
K_WORLD_54 = SDL.constants.SDLK_WORLD_54
K_WORLD_55 = SDL.constants.SDLK_WORLD_55
K_WORLD_56 = SDL.constants.SDLK_WORLD_56
K_WORLD_57 = SDL.constants.SDLK_WORLD_57
K_WORLD_58 = SDL.constants.SDLK_WORLD_58
K_WORLD_59 = SDL.constants.SDLK_WORLD_59
K_WORLD_6 = SDL.constants.SDLK_WORLD_6
K_WORLD_60 = SDL.constants.SDLK_WORLD_60
K_WORLD_61 = SDL.constants.SDLK_WORLD_61
K_WORLD_62 = SDL.constants.SDLK_WORLD_62
K_WORLD_63 = SDL.constants.SDLK_WORLD_63
K_WORLD_64 = SDL.constants.SDLK_WORLD_64
K_WORLD_65 = SDL.constants.SDLK_WORLD_65
K_WORLD_66 = SDL.constants.SDLK_WORLD_66
K_WORLD_67 = SDL.constants.SDLK_WORLD_67
K_WORLD_68 = SDL.constants.SDLK_WORLD_68
K_WORLD_69 = SDL.constants.SDLK_WORLD_69
K_WORLD_7 = SDL.constants.SDLK_WORLD_7
K_WORLD_70 = SDL.constants.SDLK_WORLD_70
K_WORLD_71 = SDL.constants.SDLK_WORLD_71
K_WORLD_72 = SDL.constants.SDLK_WORLD_72
K_WORLD_73 = SDL.constants.SDLK_WORLD_73
K_WORLD_74 = SDL.constants.SDLK_WORLD_74
K_WORLD_75 = SDL.constants.SDLK_WORLD_75
K_WORLD_76 = SDL.constants.SDLK_WORLD_76
K_WORLD_77 = SDL.constants.SDLK_WORLD_77
K_WORLD_78 = SDL.constants.SDLK_WORLD_78
K_WORLD_79 = SDL.constants.SDLK_WORLD_79
K_WORLD_8 = SDL.constants.SDLK_WORLD_8
K_WORLD_80 = SDL.constants.SDLK_WORLD_80
K_WORLD_81 = SDL.constants.SDLK_WORLD_81
K_WORLD_82 = SDL.constants.SDLK_WORLD_82
K_WORLD_83 = SDL.constants.SDLK_WORLD_83
K_WORLD_84 = SDL.constants.SDLK_WORLD_84
K_WORLD_85 = SDL.constants.SDLK_WORLD_85
K_WORLD_86 = SDL.constants.SDLK_WORLD_86
K_WORLD_87 = SDL.constants.SDLK_WORLD_87
K_WORLD_88 = SDL.constants.SDLK_WORLD_88
K_WORLD_89 = SDL.constants.SDLK_WORLD_89
K_WORLD_9 = SDL.constants.SDLK_WORLD_9
K_WORLD_90 = SDL.constants.SDLK_WORLD_90
K_WORLD_91 = SDL.constants.SDLK_WORLD_91
K_WORLD_92 = SDL.constants.SDLK_WORLD_92
K_WORLD_93 = SDL.constants.SDLK_WORLD_93
K_WORLD_94 = SDL.constants.SDLK_WORLD_94
K_WORLD_95 = SDL.constants.SDLK_WORLD_95
"""
K_a = 97
K_b = 98
K_c = 99
K_d = 100
K_e = 101
K_f = 102
K_g = 103
K_h = 104
K_i = 105
K_j = 106
K_k = 107
K_l = 108
K_m = 109
K_n = 110
K_o = 111
K_p = 112
K_q = 113
K_r = 114
K_s = 115
K_t = 116
K_u = 117
K_v = 118
K_w = 119
K_x = 120
K_y = 121
K_z = 122
#END GENERATED CONSTANTS
|
gpl-3.0
|
DengueTim/linux-rockchip
|
tools/perf/scripts/python/futex-contention.py
|
1997
|
1508
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
fhe-odoo/odoo
|
addons/l10n_multilang/l10n_multilang.py
|
378
|
8428
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import os
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class wizard_multi_charts_accounts(osv.osv_memory):
"""
Change wizard that a new account chart for a company.
* Add option to install languages during the setup
* Copy translations for COA, Tax, Tax Code and Fiscal Position from templates to target objects.
"""
_inherit = 'wizard.multi.charts.accounts'
# FIXME: in trunk, drop the force_write param entirely
def process_translations(self, cr, uid, langs, in_obj, in_field, in_ids, out_obj, out_ids, force_write=False, context=None):
"""
This method copies translations values of templates into new Accounts/Taxes/Journals for languages selected
:param cr: A database cursor
:param uid: ID of the user currently logged in
:param langs: List of languages to load for new records
:param in_field: Name of the translatable field of source templates
:param in_obj: Name of source object of templates.
:param in_ids: List of ids of source object
:param out_obj: Destination object for which translation is to be copied
:param out_ids: List of ids of destination object
:param force_write: Deprecated as of 7.0, do not use
:param context: usual context information. May contain the key 'lang', which is the language of the user running
the wizard, that will be used if force_write is True
:return: True
"""
if context is None:
context = {}
src = {}
xlat_obj = self.pool.get('ir.translation')
#find the source from Account Template
for x in in_obj.browse(cr, uid, in_ids):
src.update({x.id: x.name})
for lang in langs:
#find the value from Translation
value = xlat_obj._get_ids(cr, uid, in_obj._name + ',' + in_field, 'model', lang, in_ids)
for j in range(len(in_ids)):
in_id = in_ids[j]
if value[in_id]:
#copy Translation from Source to Destination object
xlat_obj.create(cr, uid, {
'name': out_obj._name + ',' + in_field,
'type': 'model',
'res_id': out_ids[j],
'lang': lang,
'src': src[in_id],
'value': value[in_id],
})
else:
_logger.info('Language: %s. Translation from template: there is no translation available for %s!' %(lang, src[in_id]))#out_obj._name))
return True
def execute(self, cr, uid, ids, context=None):
if not context:
context = {}
# remove the lang to get the untranslated value
ctx = dict(context, lang=None)
res = super(wizard_multi_charts_accounts, self).execute(cr, uid, ids, context=ctx)
obj_multi = self.browse(cr, uid, ids[0], context=context)
company_id = obj_multi.company_id.id
# load languages
langs = []
res_lang_obj = self.pool.get('res.lang')
installed_lang_ids = res_lang_obj.search(cr, uid, [])
installed_langs = [x.code for x in res_lang_obj.browse(cr, uid, installed_lang_ids, context=context)]
if obj_multi.chart_template_id.spoken_languages:
for lang in obj_multi.chart_template_id.spoken_languages.split(';'):
if lang not in installed_langs:
# the language is not installed, so we don't need to load its translations
continue
else:
# the language was already installed, so the po files have been loaded at the installation time
# and now we need to copy the translations of templates to the right objects
langs.append(lang)
if langs:
# write account.account translations in the real COA
self._process_accounts_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
# copy account.tax.code translations
self._process_tax_codes_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
# copy account.tax translations
self._process_taxes_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
# copy account.fiscal.position translations
self._process_fiscal_pos_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
return res
def _process_accounts_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_acc_template = self.pool.get('account.account.template')
obj_acc = self.pool.get('account.account')
acc_template_root_id = obj_multi.chart_template_id.account_root_id.id
acc_root_id = obj_acc.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', None)])[0]
in_ids = obj_acc_template.search(cr, uid, [('id', 'child_of', [acc_template_root_id])], order='id')[1:]
out_ids = obj_acc.search(cr, uid, [('id', 'child_of', [acc_root_id])], order='id')[1:]
return self.process_translations(cr, uid, langs, obj_acc_template, field, in_ids, obj_acc, out_ids, context=context)
def _process_tax_codes_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_root_id = obj_multi.chart_template_id.tax_code_root_id.id
tax_code_root_id = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', None)])[0]
in_ids = obj_tax_code_template.search(cr, uid, [('id', 'child_of', [tax_code_template_root_id])], order='id')[1:]
out_ids = obj_tax_code.search(cr, uid, [('id', 'child_of', [tax_code_root_id])], order='id')[1:]
return self.process_translations(cr, uid, langs, obj_tax_code_template, field, in_ids, obj_tax_code, out_ids, context=context)
def _process_taxes_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_tax_template = self.pool.get('account.tax.template')
obj_tax = self.pool.get('account.tax')
in_ids = [x.id for x in obj_multi.chart_template_id.tax_template_ids]
out_ids = obj_tax.search(cr, uid, [('company_id', '=', company_id)], order='id')
return self.process_translations(cr, uid, langs, obj_tax_template, field, in_ids, obj_tax, out_ids, context=context)
def _process_fiscal_pos_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
obj_fiscal_position = self.pool.get('account.fiscal.position')
in_ids = obj_fiscal_position_template.search(cr, uid, [('chart_template_id', '=', obj_multi.chart_template_id.id)], order='id')
out_ids = obj_fiscal_position.search(cr, uid, [('company_id', '=', company_id)], order='id')
return self.process_translations(cr, uid, langs, obj_fiscal_position_template, field, in_ids, obj_fiscal_position, out_ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mitmproxy/mitmproxy
|
test/mitmproxy/proxy/layers/http/test_http_version_interop.py
|
2
|
4584
|
from typing import Tuple
import h2.config
import h2.connection
import h2.events
from mitmproxy.http import HTTPFlow
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.connection import Server
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import http
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.layers.http.test_http2 import example_request_headers, example_response_headers, make_h2
from test.mitmproxy.proxy.tutils import Placeholder, Playbook, reply
h2f = FrameFactory()
def event_types(events):
return [type(x) for x in events]
def h2_client(tctx: Context) -> Tuple[h2.connection.H2Connection, Playbook]:
tctx.client.alpn = b"h2"
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
conn = h2.connection.H2Connection()
conn.initiate_connection()
server_preamble = Placeholder(bytes)
assert (
playbook
<< SendData(tctx.client, server_preamble)
)
assert event_types(conn.receive_data(server_preamble())) == [h2.events.RemoteSettingsChanged]
settings_ack = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, conn.data_to_send())
<< SendData(tctx.client, settings_ack)
)
assert event_types(conn.receive_data(settings_ack())) == [h2.events.SettingsAcknowledged]
return conn, playbook
def test_h2_to_h1(tctx):
"""Test HTTP/2 -> HTTP/1 request translation"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
conn, playbook = h2_client(tctx)
conn.send_headers(1, example_request_headers, end_stream=True)
response = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, conn.data_to_send())
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\n")
<< http.HttpResponseHeadersHook(flow)
>> reply()
>> DataReceived(server, b"Hello World!")
<< http.HttpResponseHook(flow)
<< CloseConnection(server)
>> reply(to=-2)
<< SendData(tctx.client, response)
)
events = conn.receive_data(response())
assert event_types(events) == [
h2.events.ResponseReceived, h2.events.DataReceived, h2.events.DataReceived, h2.events.StreamEnded
]
resp: h2.events.ResponseReceived = events[0]
body: h2.events.DataReceived = events[1]
assert resp.headers == [(b':status', b'200'), (b'content-length', b'12')]
assert body.data == b"Hello World!"
def test_h1_to_h2(tctx):
"""Test HTTP/1 -> HTTP/2 request translation"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
conf = h2.config.H2Configuration(client_side=False)
conn = h2.connection.H2Connection(conf)
conn.initiate_connection()
request = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n")
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, request)
)
events = conn.receive_data(request())
assert event_types(events) == [
h2.events.RemoteSettingsChanged, h2.events.RequestReceived, h2.events.StreamEnded
]
conn.send_headers(1, example_response_headers)
conn.send_data(1, b"Hello World!", end_stream=True)
settings_ack = Placeholder(bytes)
assert (
playbook
>> DataReceived(server, conn.data_to_send())
<< http.HttpResponseHeadersHook(flow)
<< SendData(server, settings_ack)
>> reply(to=-2)
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\n\r\nHello World!")
<< CloseConnection(tctx.client)
)
assert settings_ack() == b'\x00\x00\x00\x04\x01\x00\x00\x00\x00'
|
mit
|
boudewijnrempt/breakpad
|
src/tools/gyp/pylib/gyp/sun_tool.py
|
314
|
1569
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-sun-tool when using the Makefile
generator."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = SunTool()
executor.Dispatch(args)
class SunTool(object):
"""This class performs all the SunOS tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
bsd-3-clause
|
hbrunn/OCB
|
addons/hr_timesheet_invoice/wizard/hr_timesheet_final_invoice_create.py
|
337
|
3000
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
#
# Create an final invoice based on selected timesheet lines
#
#
# TODO: check unit of measure !!!
#
class final_invoice_create(osv.osv_memory):
_name = 'hr.timesheet.invoice.create.final'
_description = 'Create invoice from timesheet final'
_columns = {
'date': fields.boolean('Date', help='Display date in the history of works'),
'time': fields.boolean('Time Spent', help='Display time in the history of works'),
'name': fields.boolean('Log of Activity', help='Display detail of work in the invoice line.'),
'price': fields.boolean('Cost', help='Display cost of the item you reinvoice'),
'product': fields.many2one('product.product', 'Product', help='The product that will be used to invoice the remaining amount'),
}
def do_create(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
# hack for fixing small issue (context should not propagate implicitly between actions)
if 'default_type' in context:
del context['default_type']
ids = self.pool.get('account.analytic.line').search(cr, uid, [('invoice_id','=',False),('to_invoice','<>', False), ('account_id', 'in', context['active_ids'])], context=context)
invs = self.pool.get('account.analytic.line').invoice_cost_create(cr, uid, ids, data, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
mod_ids = mod_obj.search(cr, uid, [('name', '=', 'action_invoice_tree1')], context=context)[0]
res_id = mod_obj.read(cr, uid, mod_ids, ['res_id'], context=context)['res_id']
act_win = act_obj.read(cr, uid, [res_id], context=context)[0]
act_win['domain'] = [('id','in',invs),('type','=','out_invoice')]
act_win['name'] = _('Invoices')
return act_win
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
wrouesnel/ansible
|
test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py
|
160
|
2146
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
result['location'] = 'role: bar'
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
poldrack/myconnectome
|
myconnectome/openfmri/washu_fieldmap.py
|
2
|
1085
|
import json,glob,os
import dicom
outdir='/scratch/01329/poldrack/selftracking/ds031/sub00001/ses105/fieldmap'
washubase='/scratch/01329/poldrack/selftracking/washu'
basedirs=['vc39556','vc39556_2']
fmseries=[[5,10,21],[5,17]]
ctr=1
for i in range(2):
basedir=os.path.join(washubase,basedirs[i])
for j in range(len(fmseries[i])):
seriesnum=fmseries[i][j]
dcmfile=glob.glob('%s/DICOM/VC*.MR.HEAD_LAUMANN.%04d.*.IMA'%(basedir,seriesnum))[0]
print dcmfile
dcmdata=dicom.read_file(dcmfile)
dcmdict={}
for k in dcmdata.dir():
dd=dcmdata.data_element(k)
try:
dd.value.decode('ascii')
dcmdict[dd.name]=dd.value
except:
try:
dd.value.original_string.decode('ascii')
dcmdict[dd.name]=dd.value
except:
pass
jsonfile='%s/sub00001_ses105_%03d.json'%(outdir,ctr)
ctr+=1
f=open(jsonfile,'w')
f.write(json.dumps(dcmdict,indent=4))
f.close()
|
mit
|
GenericMappingTools/gmt-python
|
pygmt/tests/test_rose.py
|
1
|
5398
|
"""
Tests for rose.
"""
import numpy as np
import pytest
from pygmt import Figure
from pygmt.datasets import load_fractures_compilation
@pytest.fixture(scope="module", name="data")
def fixture_data():
"""
Load the sample numpy array data.
"""
return np.array(
[[40, 60], [60, 300], [20, 180], [30, 190], [60, 90], [40, 110], [80, 125]]
)
@pytest.fixture(scope="module", name="data_fractures_compilation")
def fixture_data_fractures_compilation():
"""
Load the sample fractures compilation dataset which contains fracture
lengths and azimuths as hypothetically digitized from geological maps.
Lengths are stored in the first column, azimuths in the second.
"""
return load_fractures_compilation()
@pytest.mark.mpl_image_compare
def test_rose_data_file(data_fractures_compilation):
"""
Test supplying data from sample dataset.
"""
fig = Figure()
fig.rose(
data=data_fractures_compilation,
region=[0, 1, 0, 360],
sector=15,
diameter="5.5c",
color="blue",
frame=["x0.2g0.2", "y30g30", "+glightgray"],
pen="1p",
norm="",
scale=0.4,
)
return fig
@pytest.mark.mpl_image_compare
def test_rose_2d_array_single():
"""
Test supplying a 2D numpy array containing a single pair of lengths and
directions.
"""
data = np.array([[40, 60]])
fig = Figure()
fig.rose(
data=data,
region=[0, 1, 0, 360],
sector=10,
diameter="5.5c",
color="cyan",
frame=["x0.2g0.2", "y30g30", "+glightgray"],
pen="1p",
norm=True,
scale=0.4,
)
return fig
@pytest.mark.mpl_image_compare
def test_rose_2d_array_multiple(data):
"""
Test supplying a 2D numpy array containing a list of lengths and
directions.
"""
fig = Figure()
fig.rose(
data=data,
region=[0, 1, 0, 360],
sector=10,
diameter="5.5c",
color="blue",
frame=["x0.2g0.2", "y30g30", "+gmoccasin"],
pen="1p",
norm=True,
scale=0.4,
)
return fig
@pytest.mark.mpl_image_compare
def test_rose_plot_data_using_cpt(data):
"""
Test supplying a 2D numpy array containing a list of lengths and
directions.
Use a cmap to color sectors.
"""
fig = Figure()
fig.rose(
data=data,
region=[0, 1, 0, 360],
sector=15,
diameter="5.5c",
cmap="batlow",
frame=["x0.2g0.2", "y30g30", "+gdarkgray"],
pen="1p",
norm=True,
scale=0.4,
)
return fig
@pytest.mark.mpl_image_compare
def test_rose_plot_with_transparency(data_fractures_compilation):
"""
Test supplying the sample fractures compilation dataset to the data
parameter.
Use transparency.
"""
fig = Figure()
fig.rose(
data=data_fractures_compilation,
region=[0, 1, 0, 360],
sector=15,
diameter="5.5c",
color="blue",
frame=["x0.2g0.2", "y30g30", "+glightgray"],
pen="1p",
norm=True,
scale=0.4,
transparency=50,
)
return fig
@pytest.mark.mpl_image_compare
def test_rose_no_sectors(data_fractures_compilation):
"""
Test supplying the sample fractures compilation dataset to the data
parameter.
Plot data without defining a sector width, add a title and rename labels.
"""
fig = Figure()
fig.rose(
data=data_fractures_compilation,
region=[0, 500, 0, 360],
diameter="10c",
labels="180/0/90/270",
frame=["xg100", "yg45", "+t'Windrose diagram'"],
pen="1.5p,red3",
transparency=40,
scale=0.5,
)
return fig
@pytest.mark.mpl_image_compare
def test_rose_bools(data_fractures_compilation):
"""
Test supplying the sample fractures compilation dataset to the data
parameter.
Test bools.
"""
fig = Figure()
fig.rose(
data=data_fractures_compilation,
region=[0, 1, 0, 360],
sector=10,
diameter="10c",
frame=["x0.2g0.2", "y30g30", "+glightgray"],
color="red3",
pen="1p",
orientation=False,
norm=True,
vectors=True,
no_scale=True,
shift=False,
)
return fig
@pytest.mark.mpl_image_compare(filename="test_rose_bools.png")
def test_rose_deprecate_columns_to_incols(data_fractures_compilation):
"""
Make sure that the old parameter "columns" is supported and it reports a
warning.
Modified from the test_rose_bools() test.
"""
# swap data column order of the sample fractures compilation dataset,
# as the use of the 'columns' parameter will reverse this action
data = data_fractures_compilation[["azimuth", "length"]]
fig = Figure()
with pytest.warns(expected_warning=FutureWarning) as record:
fig.rose(
data=data,
region=[0, 1, 0, 360],
sector=10,
columns=[1, 0],
diameter="10c",
frame=["x0.2g0.2", "y30g30", "+glightgray"],
color="red3",
pen="1p",
orientation=False,
norm=True,
vectors=True,
no_scale=True,
shift=False,
)
assert len(record) == 1 # check that only one warning was raised
return fig
|
bsd-3-clause
|
SomethingExplosive/android_external_chromium_org
|
media/tools/layout_tests/layouttests.py
|
144
|
8952
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Layout tests module that is necessary for the layout analyzer.
Layout tests are stored in an SVN repository and LayoutTestCaseManager collects
these layout test cases (including description).
"""
import copy
import csv
import locale
import re
import sys
import urllib2
import pysvn
# LayoutTests SVN root location.
DEFAULT_LAYOUTTEST_LOCATION = (
'http://src.chromium.org/blink/trunk/LayoutTests/')
# LayoutTests SVN view link
DEFAULT_LAYOUTTEST_SVN_VIEW_LOCATION = (
'http://src.chromium.org/viewvc/blink/trunk/LayoutTests/')
# When parsing the test HTML file and finding the test description,
# this script tries to find the test description using sentences
# starting with these keywords. This is adhoc but it is the only way
# since there is no standard for writing test description.
KEYWORDS_FOR_TEST_DESCRIPTION = ['This test', 'Tests that', 'Test ']
# If cannot find the keywords, this script tries to find test case
# description by the following tags.
TAGS_FOR_TEST_DESCRIPTION = ['title', 'p', 'div']
# If cannot find the tags, this script tries to find the test case
# description in the sentence containing following words.
KEYWORD_FOR_TEST_DESCRIPTION_FAIL_SAFE = ['PASSED ', 'PASS:']
class LayoutTests(object):
"""A class to store test names in layout tests.
The test names (including regular expression patterns) are read from a CSV
file and used for getting layout test names from repository.
"""
def __init__(self, layouttest_root_path=DEFAULT_LAYOUTTEST_LOCATION,
parent_location_list=None, filter_names=None,
recursion=False):
"""Initialize LayoutTests using root and CSV file.
Args:
layouttest_root_path: A location string where layout tests are stored.
parent_location_list: A list of parent directories that are needed for
getting layout tests.
filter_names: A list of test name patterns that are used for filtering
test names (e.g., media/*.html).
recursion: a boolean indicating whether the test names are sought
recursively.
"""
if layouttest_root_path.startswith('http://'):
name_map = self.GetLayoutTestNamesFromSVN(parent_location_list,
layouttest_root_path,
recursion)
else:
# TODO(imasaki): support other forms such as CSV for reading test names.
pass
self.name_map = copy.copy(name_map)
if filter_names:
# Filter names.
for lt_name in name_map.iterkeys():
match = False
for filter_name in filter_names:
if re.search(filter_name, lt_name):
match = True
break
if not match:
del self.name_map[lt_name]
# We get description only for the filtered names.
for lt_name in self.name_map.iterkeys():
self.name_map[lt_name] = 'No description available'
@staticmethod
def ExtractTestDescription(txt):
"""Extract the description description from test code in HTML.
Currently, we have 4 rules described in the code below.
(This example falls into rule 1):
<p>
This tests the intrinsic size of a video element is the default
300,150 before metadata is loaded, and 0,0 after
metadata is loaded for an audio-only file.
</p>
The strategy is very adhoc since the original test case files
(in HTML format) do not have standard way to store test description.
Args:
txt: A HTML text which may or may not contain test description.
Returns:
A string that contains test description. Returns 'UNKNOWN' if the
test description is not found.
"""
# (1) Try to find test description that contains keywords such as
# 'test that' and surrounded by p tag.
# This is the most common case.
for keyword in KEYWORDS_FOR_TEST_DESCRIPTION:
# Try to find <p> and </p>.
pattern = r'<p>(.*' + keyword + '.*)</p>'
matches = re.search(pattern, txt)
if matches is not None:
return matches.group(1).strip()
# (2) Try to find it by using more generic keywords such as 'PASS' etc.
for keyword in KEYWORD_FOR_TEST_DESCRIPTION_FAIL_SAFE:
# Try to find new lines.
pattern = r'\n(.*' + keyword + '.*)\n'
matches = re.search(pattern, txt)
if matches is not None:
# Remove 'p' tag.
text = matches.group(1).strip()
return text.replace('<p>', '').replace('</p>', '')
# (3) Try to find it by using HTML tag such as title.
for tag in TAGS_FOR_TEST_DESCRIPTION:
pattern = r'<' + tag + '>(.*)</' + tag + '>'
matches = re.search(pattern, txt)
if matches is not None:
return matches.group(1).strip()
# (4) Try to find it by using test description and remove 'p' tag.
for keyword in KEYWORDS_FOR_TEST_DESCRIPTION:
# Try to find <p> and </p>.
pattern = r'\n(.*' + keyword + '.*)\n'
matches = re.search(pattern, txt)
if matches is not None:
# Remove 'p' tag.
text = matches.group(1).strip()
return text.replace('<p>', '').replace('</p>', '')
# (5) cannot find test description using existing rules.
return 'UNKNOWN'
@staticmethod
def GetLayoutTestNamesFromSVN(parent_location_list,
layouttest_root_path, recursion):
"""Get LayoutTest names from SVN.
Args:
parent_location_list: a list of locations of parent directories. This is
used when getting layout tests using PySVN.list().
layouttest_root_path: the root path of layout tests directory.
recursion: a boolean indicating whether the test names are sought
recursively.
Returns:
a map containing test names as keys for de-dupe.
"""
client = pysvn.Client()
# Get directory structure in the repository SVN.
name_map = {}
for parent_location in parent_location_list:
if parent_location.endswith('/'):
full_path = layouttest_root_path + parent_location
try:
file_list = client.list(full_path, recurse=recursion)
for file_name in file_list:
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding
else:
default_encoding = locale.getpreferredencoding()
file_name = file_name[0].repos_path.encode(default_encoding)
# Remove the word '/truck/LayoutTests'.
file_name = file_name.replace('/trunk/LayoutTests/', '')
if file_name.endswith('.html'):
name_map[file_name] = True
except:
print 'Unable to list tests in %s.' % full_path
return name_map
@staticmethod
def GetLayoutTestNamesFromCSV(csv_file_path):
"""Get layout test names from CSV file.
Args:
csv_file_path: the path for the CSV file containing test names (including
regular expression patterns). The CSV file content has one column and
each row contains a test name.
Returns:
a list of test names in string.
"""
file_object = file(csv_file_path, 'r')
reader = csv.reader(file_object)
names = [row[0] for row in reader]
file_object.close()
return names
@staticmethod
def GetParentDirectoryList(names):
"""Get parent directory list from test names.
Args:
names: a list of test names. The test names also have path information as
well (e.g., media/video-zoom.html).
Returns:
a list of parent directories for the given test names.
"""
pd_map = {}
for name in names:
p_dir = name[0:name.rfind('/') + 1]
pd_map[p_dir] = True
return list(pd_map.iterkeys())
def JoinWithTestExpectation(self, test_expectations):
"""Join layout tests with the test expectation file using test name as key.
Args:
test_expectations: a test expectations object.
Returns:
test_info_map contains test name as key and another map as value. The
other map contains test description and the test expectation
information which contains keyword (e.g., 'GPU') as key (we do
not care about values). The map data structure is used since we
have to look up these keywords several times.
"""
test_info_map = {}
for (lt_name, desc) in self.name_map.items():
test_info_map[lt_name] = {}
test_info_map[lt_name]['desc'] = desc
for (te_name, te_info) in (
test_expectations.all_test_expectation_info.items()):
if te_name == lt_name or (
te_name in lt_name and te_name.endswith('/')):
# Only keep the first match when found.
test_info_map[lt_name]['te_info'] = te_info
break
return test_info_map
|
bsd-3-clause
|
ibmsoe/tensorflow
|
tensorflow/python/saved_model/signature_constants.py
|
119
|
2635
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signature constants for SavedModel save and restore operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.all_util import remove_undocumented
# Key in the signature def map for `default` serving signatures. The default
# signature is used in inference requests where a specific signature was not
# specified.
DEFAULT_SERVING_SIGNATURE_DEF_KEY = "serving_default"
################################################################################
# Classification API constants.
# Classification inputs.
CLASSIFY_INPUTS = "inputs"
# Classification method name used in a SignatureDef.
CLASSIFY_METHOD_NAME = "tensorflow/serving/classify"
# Classification classes output.
CLASSIFY_OUTPUT_CLASSES = "classes"
# Classification scores output.
CLASSIFY_OUTPUT_SCORES = "scores"
################################################################################
# Prediction API constants.
# Predict inputs.
PREDICT_INPUTS = "inputs"
# Prediction method name used in a SignatureDef.
PREDICT_METHOD_NAME = "tensorflow/serving/predict"
# Predict outputs.
PREDICT_OUTPUTS = "outputs"
################################################################################
# Regression API constants.
# Regression inputs.
REGRESS_INPUTS = "inputs"
# Regression method name used in a SignatureDef.
REGRESS_METHOD_NAME = "tensorflow/serving/regress"
# Regression outputs.
REGRESS_OUTPUTS = "outputs"
################################################################################
_allowed_symbols = [
"DEFAULT_SERVING_SIGNATURE_DEF_KEY",
"CLASSIFY_INPUTS",
"CLASSIFY_METHOD_NAME",
"CLASSIFY_OUTPUT_CLASSES",
"CLASSIFY_OUTPUT_SCORES",
"PREDICT_INPUTS",
"PREDICT_METHOD_NAME",
"PREDICT_OUTPUTS",
"REGRESS_INPUTS",
"REGRESS_METHOD_NAME",
"REGRESS_OUTPUTS",
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
ennoborg/gramps
|
gramps/gui/views/treemodels/peoplemodel.py
|
1
|
23073
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2009-2010 Nick Hall
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
TreeModel for the Gramps Person tree.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from html import escape
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger(".")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import (Name, EventRef, EventType, EventRoleType,
FamilyRelType, ChildRefType, NoteType)
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.datehandler import format_time, get_date, get_date_valid
from .flatbasemodel import FlatBaseModel
from .treebasemodel import TreeBaseModel
from .basemodel import BaseModel
from gramps.gen.config import config
#-------------------------------------------------------------------------
#
# COLUMN constants; positions in raw data structure
#
#-------------------------------------------------------------------------
COLUMN_ID = 1
COLUMN_GENDER = 2
COLUMN_NAME = 3
COLUMN_DEATH = 5
COLUMN_BIRTH = 6
COLUMN_EVENT = 7
COLUMN_FAMILY = 8
COLUMN_PARENT = 9
COLUMN_NOTES = 16
COLUMN_CHANGE = 17
COLUMN_TAGS = 18
COLUMN_PRIV = 19
invalid_date_format = config.get('preferences.invalid-date-format')
#-------------------------------------------------------------------------
#
# PeopleBaseModel
#
#-------------------------------------------------------------------------
class PeopleBaseModel(BaseModel):
"""
Basic Model interface to handle the PersonViews
"""
_GENDER = [ _('female'), _('male'), _('unknown') ]
def __init__(self, db):
"""
Initialize the model building the initial data
"""
BaseModel.__init__(self)
self.db = db
self.gen_cursor = db.get_person_cursor
self.map = db.get_raw_person_data
self.fmap = [
self.column_name,
self.column_id,
self.column_gender,
self.column_birth_day,
self.column_birth_place,
self.column_death_day,
self.column_death_place,
self.column_spouse,
self.column_parents,
self.column_marriages,
self.column_children,
self.column_todo,
self.column_private,
self.column_tags,
self.column_change,
self.column_tag_color,
]
self.smap = [
self.sort_name,
self.column_id,
self.column_gender,
self.sort_birth_day,
self.column_birth_place,
self.sort_death_day,
self.column_death_place,
self.column_spouse,
self.sort_parents,
self.sort_marriages,
self.sort_children,
self.sort_todo,
self.column_private,
self.column_tags,
self.sort_change,
self.column_tag_color,
]
def destroy(self):
"""
Unset all elements that can prevent garbage collection
"""
BaseModel.destroy(self)
self.db = None
self.gen_cursor = None
self.map = None
self.fmap = None
self.smap = None
def color_column(self):
"""
Return the color column.
"""
return 15
def on_get_n_columns(self):
""" Return the number of columns in the model """
return len(self.fmap)+1
def sort_name(self, data):
handle = data[0]
cached, name = self.get_cached_value(handle, "SORT_NAME")
if not cached:
name = name_displayer.raw_sorted_name(data[COLUMN_NAME])
# internally we work with utf-8
if not isinstance(name, str):
name = name.decode('utf-8')
self.set_cached_value(handle, "SORT_NAME", name)
return name
def column_name(self, data):
handle = data[0]
cached, name = self.get_cached_value(handle, "NAME")
if not cached:
name = name_displayer.raw_display_name(data[COLUMN_NAME])
# internally we work with utf-8 for python 2.7
if not isinstance(name, str):
name = name.encode('utf-8')
self.set_cached_value(handle, "NAME", name)
return name
def column_spouse(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SPOUSE")
if not cached:
value = self._get_spouse_data(data)
self.set_cached_value(handle, "SPOUSE", value)
return value
def column_private(self, data):
if data[COLUMN_PRIV]:
return 'gramps-lock'
else:
# There is a problem returning None here.
return ''
def _get_spouse_data(self, data):
spouses_names = ""
for family_handle in data[COLUMN_FAMILY]:
family = self.db.get_family_from_handle(family_handle)
for spouse_id in [family.get_father_handle(),
family.get_mother_handle()]:
if not spouse_id:
continue
if spouse_id == data[0]:
continue
spouse = self.db.get_person_from_handle(spouse_id)
if spouses_names:
spouses_names += ", "
spouses_names += name_displayer.display(spouse)
return spouses_names
def column_id(self, data):
return data[COLUMN_ID]
def sort_change(self,data):
return "%012x" % data[COLUMN_CHANGE]
def column_change(self, data):
return format_time(data[COLUMN_CHANGE])
def column_gender(self, data):
return PeopleBaseModel._GENDER[data[COLUMN_GENDER]]
def column_birth_day(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "BIRTH_DAY")
if not cached:
value = self._get_birth_data(data, False)
self.set_cached_value(handle, "BIRTH_DAY", value)
return value
def sort_birth_day(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SORT_BIRTH_DAY")
if not cached:
value = self._get_birth_data(data, True)
self.set_cached_value(handle, "SORT_BIRTH_DAY", value)
return value
def _get_birth_data(self, data, sort_mode):
index = data[COLUMN_BIRTH]
if index != -1:
try:
local = data[COLUMN_EVENT][index]
b = EventRef()
b.unserialize(local)
birth = self.db.get_event_from_handle(b.ref)
if sort_mode:
retval = "%09d" % birth.get_date_object().get_sort_value()
else:
date_str = get_date(birth)
if date_str != "":
retval = escape(date_str)
if not get_date_valid(birth):
return invalid_date_format % retval
else:
return retval
except:
return ''
for event_ref in data[COLUMN_EVENT]:
er = EventRef()
er.unserialize(event_ref)
event = self.db.get_event_from_handle(er.ref)
etype = event.get_type()
date_str = get_date(event)
if (etype in [EventType.BAPTISM, EventType.CHRISTEN]
and er.get_role() == EventRoleType.PRIMARY
and date_str != ""):
if sort_mode:
retval = "%09d" % event.get_date_object().get_sort_value()
else:
retval = "<i>%s</i>" % escape(date_str)
if not get_date_valid(event):
return invalid_date_format % retval
else:
return retval
return ""
def column_death_day(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "DEATH_DAY")
if not cached:
value = self._get_death_data(data, False)
self.set_cached_value(handle, "DEATH_DAY", value)
return value
def sort_death_day(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SORT_DEATH_DAY")
if not cached:
value = self._get_death_data(data, True)
self.set_cached_value(handle, "SORT_DEATH_DAY", value)
return value
def _get_death_data(self, data, sort_mode):
index = data[COLUMN_DEATH]
if index != -1:
try:
local = data[COLUMN_EVENT][index]
ref = EventRef()
ref.unserialize(local)
event = self.db.get_event_from_handle(ref.ref)
if sort_mode:
retval = "%09d" % event.get_date_object().get_sort_value()
else:
date_str = get_date(event)
if date_str != "":
retval = escape(date_str)
if not get_date_valid(event):
return invalid_date_format % retval
else:
return retval
except:
return ''
for event_ref in data[COLUMN_EVENT]:
er = EventRef()
er.unserialize(event_ref)
event = self.db.get_event_from_handle(er.ref)
etype = event.get_type()
date_str = get_date(event)
if (etype in [EventType.BURIAL,
EventType.CREMATION,
EventType.CAUSE_DEATH]
and er.get_role() == EventRoleType.PRIMARY
and date_str):
if sort_mode:
retval = "%09d" % event.get_date_object().get_sort_value()
else:
retval = "<i>%s</i>" % escape(date_str)
if not get_date_valid(event):
return invalid_date_format % retval
else:
return retval
return ""
def column_birth_place(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "BIRTH_PLACE")
if cached:
return value
else:
index = data[COLUMN_BIRTH]
if index != -1:
try:
local = data[COLUMN_EVENT][index]
br = EventRef()
br.unserialize(local)
event = self.db.get_event_from_handle(br.ref)
if event:
place_title = place_displayer.display_event(self.db, event)
if place_title:
value = escape(place_title)
self.set_cached_value(handle, "BIRTH_PLACE", value)
return value
except:
value = ''
self.set_cached_value(handle, "BIRTH_PLACE", value)
return value
for event_ref in data[COLUMN_EVENT]:
er = EventRef()
er.unserialize(event_ref)
event = self.db.get_event_from_handle(er.ref)
etype = event.get_type()
if (etype in [EventType.BAPTISM, EventType.CHRISTEN] and
er.get_role() == EventRoleType.PRIMARY):
place_title = place_displayer.display_event(self.db, event)
if place_title:
value = "<i>%s</i>" % escape(place_title)
self.set_cached_value(handle, "BIRTH_PLACE", value)
return value
value = ""
self.set_cached_value(handle, "BIRTH_PLACE", value)
return value
def column_death_place(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "DEATH_PLACE")
if cached:
return value
else:
index = data[COLUMN_DEATH]
if index != -1:
try:
local = data[COLUMN_EVENT][index]
dr = EventRef()
dr.unserialize(local)
event = self.db.get_event_from_handle(dr.ref)
if event:
place_title = place_displayer.display_event(self.db, event)
if place_title:
value = escape(place_title)
self.set_cached_value(handle, "DEATH_PLACE", value)
return value
except:
value = ''
self.set_cached_value(handle, "DEATH_PLACE", value)
return value
for event_ref in data[COLUMN_EVENT]:
er = EventRef()
er.unserialize(event_ref)
event = self.db.get_event_from_handle(er.ref)
etype = event.get_type()
if (etype in [EventType.BURIAL, EventType.CREMATION,
EventType.CAUSE_DEATH]
and er.get_role() == EventRoleType.PRIMARY):
place_title = place_displayer.display_event(self.db, event)
if place_title:
value = "<i>%s</i>" % escape(place_title)
self.set_cached_value(handle, "DEATH_PLACE", value)
return value
value = ""
self.set_cached_value(handle, "DEATH_PLACE", value)
return value
def _get_parents_data(self, data):
parents = 0
if data[COLUMN_PARENT]:
family = self.db.get_family_from_handle(data[COLUMN_PARENT][0])
if family.get_father_handle():
parents += 1
if family.get_mother_handle():
parents += 1
return parents
def _get_marriages_data(self, data):
marriages = 0
for family_handle in data[COLUMN_FAMILY]:
family = self.db.get_family_from_handle(family_handle)
if int(family.get_relationship()) == FamilyRelType.MARRIED:
marriages += 1
return marriages
def _get_children_data(self, data):
children = 0
for family_handle in data[COLUMN_FAMILY]:
family = self.db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
if (child_ref.get_father_relation() == ChildRefType.BIRTH and
child_ref.get_mother_relation() == ChildRefType.BIRTH):
children += 1
return children
def _get_todo_data(self, data):
todo = 0
for note_handle in data[COLUMN_NOTES]:
note = self.db.get_note_from_handle(note_handle)
if int(note.get_type()) == NoteType.TODO:
todo += 1
return todo
def column_parents(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "PARENTS")
if not cached:
value = self._get_parents_data(data)
self.set_cached_value(handle, "PARENTS", value)
return str(value)
def sort_parents(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SORT_PARENTS")
if not cached:
value = self._get_parents_data(data)
self.set_cached_value(handle, "SORT_PARENTS", value)
return '%06d' % value
def column_marriages(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "MARRIAGES")
if not cached:
value = self._get_marriages_data(data)
self.set_cached_value(handle, "MARRIAGES", value)
return str(value)
def sort_marriages(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SORT_MARRIAGES")
if not cached:
value = self._get_marriages_data(data)
self.set_cached_value(handle, "SORT_MARRIAGES", value)
return '%06d' % value
def column_children(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "CHILDREN")
if not cached:
value = self._get_children_data(data)
self.set_cached_value(handle, "CHILDREN", value)
return str(value)
def sort_children(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SORT_CHILDREN")
if not cached:
value = self._get_children_data(data)
self.set_cached_value(handle, "SORT_CHILDREN", value)
return '%06d' % value
def column_todo(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "TODO")
if not cached:
value = self._get_todo_data(data)
self.set_cached_value(handle, "TODO", value)
return str(value)
def sort_todo(self, data):
handle = data[0]
cached, value = self.get_cached_value(handle, "SORT_TODO")
if not cached:
value = self._get_todo_data(data)
self.set_cached_value(handle, "SORT_TODO", value)
return '%06d' % value
def get_tag_name(self, tag_handle):
"""
Return the tag name from the given tag handle.
"""
cached, value = self.get_cached_value(tag_handle, "TAG_NAME")
if not cached:
tag = self.db.get_tag_from_handle(tag_handle)
if tag:
value = tag.get_name()
self.set_cached_value(tag_handle, "TAG_NAME", value)
return value
def column_tag_color(self, data):
"""
Return the tag color.
"""
tag_handle = data[0]
cached, value = self.get_cached_value(tag_handle, "TAG_COLOR")
if not cached:
tag_color = "#000000000000"
tag_priority = None
for handle in data[COLUMN_TAGS]:
tag = self.db.get_tag_from_handle(handle)
if tag:
this_priority = tag.get_priority()
if tag_priority is None or this_priority < tag_priority:
tag_color = tag.get_color()
tag_priority = this_priority
value = tag_color
self.set_cached_value(tag_handle, "TAG_COLOR", value)
return value
def column_tags(self, data):
"""
Return the sorted list of tags.
"""
handle = data[0]
cached, value = self.get_cached_value(handle, "TAGS")
if not cached:
tag_list = list(map(self.get_tag_name, data[COLUMN_TAGS]))
value = ', '.join(sorted(tag_list, key=glocale.sort_key))
self.set_cached_value(handle, "TAGS", value)
return value
class PersonListModel(PeopleBaseModel, FlatBaseModel):
"""
Listed people model.
"""
def __init__(self, db, uistate, scol=0, order=Gtk.SortType.ASCENDING,
search=None, skip=set(), sort_map=None):
PeopleBaseModel.__init__(self, db)
FlatBaseModel.__init__(self, db, uistate, search=search, skip=skip,
scol=scol, order=order, sort_map=sort_map)
def destroy(self):
"""
Unset all elements that can prevent garbage collection
"""
PeopleBaseModel.destroy(self)
FlatBaseModel.destroy(self)
class PersonTreeModel(PeopleBaseModel, TreeBaseModel):
"""
Hierarchical people model.
"""
def __init__(self, db, uistate, scol=0, order=Gtk.SortType.ASCENDING,
search=None, skip=set(), sort_map=None):
PeopleBaseModel.__init__(self, db)
TreeBaseModel.__init__(self, db, uistate, search=search, skip=skip,
scol=scol, order=order, sort_map=sort_map)
def destroy(self):
"""
Unset all elements that can prevent garbage collection
"""
PeopleBaseModel.destroy(self)
self.number_items = None
TreeBaseModel.destroy(self)
def _set_base_data(self):
"""See TreeBaseModel, we also set some extra lru caches
"""
self.number_items = self.db.get_number_of_people
def get_tree_levels(self):
"""
Return the headings of the levels in the hierarchy.
"""
return [_('Group As'), _('Name')]
def column_header(self, node):
return node.name
def add_row(self, handle, data):
"""
Add nodes to the node map for a single person.
handle The handle of the gramps object.
data The object data.
"""
ngn = name_displayer.name_grouping_data
name_data = data[COLUMN_NAME]
group_name = ngn(self.db, name_data)
#if isinstance(group_name, str):
# group_name = group_name.encode('utf-8')
sort_key = self.sort_func(data)
#if group_name not in self.group_list:
#self.group_list.append(group_name)
#self.add_node(None, group_name, group_name, None)
# add as node: parent, child, sortkey, handle; parent and child are
# nodes in the treebasemodel, and will be used as iters
self.add_node(group_name, handle, sort_key, handle)
|
gpl-2.0
|
idlead/scikit-learn
|
sklearn/decomposition/fastica_.py
|
54
|
18240
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.')
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_samples, n_components) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten, dtype=FLOAT_DTYPES).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
bsd-3-clause
|
prune998/ansible
|
lib/ansible/modules/cloud/softlayer/sl_vm.py
|
27
|
12051
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sl_vm
short_description: create or cancel a virtual instance in SoftLayer
description:
- Creates or cancels SoftLayer instances. When created, optionally waits for it to be 'running'.
version_added: "2.1"
options:
instance_id:
description:
- Instance Id of the virtual instance to perform action option
required: false
default: null
hostname:
description:
- Hostname to be provided to a virtual instance
required: false
default: null
domain:
description:
- Domain name to be provided to a virtual instance
required: false
default: null
datacenter:
description:
- Datacenter for the virtual instance to be deployed
required: false
default: null
tags:
description:
- Tag or list of tags to be provided to a virtual instance
required: false
default: null
hourly:
description:
- Flag to determine if the instance should be hourly billed
required: false
default: true
private:
description:
- Flag to determine if the instance should be private only
required: false
default: false
dedicated:
description:
- Falg to determine if the instance should be deployed in dedicated space
required: false
default: false
local_disk:
description:
- Flag to determine if local disk should be used for the new instance
required: false
default: true
cpus:
description:
- Count of cpus to be assigned to new virtual instance
required: true
default: null
memory:
description:
- Amount of memory to be assigned to new virtual instance
required: true
default: null
disks:
description:
- List of disk sizes to be assigned to new virtual instance
required: true
default: [25]
os_code:
description:
- OS Code to be used for new virtual instance
required: false
default: null
image_id:
description:
- Image Template to be used for new virtual instance
required: false
default: null
nic_speed:
description:
- NIC Speed to be assigned to new virtual instance
required: false
default: 10
public_vlan:
description:
- VLAN by its Id to be assigned to the public NIC
required: false
default: null
private_vlan:
description:
- VLAN by its Id to be assigned to the private NIC
required: false
default: null
ssh_keys:
description:
- List of ssh keys by their Id to be assigned to a virtual instance
required: false
default: null
post_uri:
description:
- URL of a post provisioning script to be loaded and executed on virtual instance
required: false
default: null
state:
description:
- Create, or cancel a virtual instance. Specify "present" for create, "absent" to cancel.
required: false
default: 'present'
wait:
description:
- Flag used to wait for active status before returning
required: false
default: true
wait_timeout:
description:
- time in seconds before wait returns
required: false
default: 600
requirements:
- "python >= 2.6"
- "softlayer >= 4.1.1"
author: "Matt Colton (@mcltn)"
'''
EXAMPLES = '''
- name: Build instance
hosts: localhost
gather_facts: False
tasks:
- name: Build instance request
sl_vm:
hostname: instance-1
domain: anydomain.com
datacenter: dal09
tags: ansible-module-test
hourly: True
private: False
dedicated: False
local_disk: True
cpus: 1
memory: 1024
disks: [25]
os_code: UBUNTU_LATEST
wait: False
- name: Build additional instances
hosts: localhost
gather_facts: False
tasks:
- name: Build instances request
sl_vm:
hostname: "{{ item.hostname }}"
domain: "{{ item.domain }}"
datacenter: "{{ item.datacenter }}"
tags: "{{ item.tags }}"
hourly: "{{ item.hourly }}"
private: "{{ item.private }}"
dedicated: "{{ item.dedicated }}"
local_disk: "{{ item.local_disk }}"
cpus: "{{ item.cpus }}"
memory: "{{ item.memory }}"
disks: "{{ item.disks }}"
os_code: "{{ item.os_code }}"
ssh_keys: "{{ item.ssh_keys }}"
wait: "{{ item.wait }}"
with_items:
- hostname: instance-2
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: True
private: False
dedicated: False
local_disk: True
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: True
- hostname: instance-3
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: True
private: False
dedicated: False
local_disk: True
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: True
- name: Cancel instances
hosts: localhost
gather_facts: False
tasks:
- name: Cancel by tag
sl_vm:
state: absent
tags: ansible-module-test
'''
# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
RETURN = '''# '''
import time
#TODO: get this info from API
STATES = ['present', 'absent']
DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'fra02', 'hkg02', 'hou02', 'lon02', 'mel01', 'mex01', 'mil01', 'mon01',
'osl01', 'par01', 'sjc01', 'sjc03', 'sao01', 'sea01', 'sng01', 'syd01', 'tok02', 'tor01', 'wdc01', 'wdc04']
CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
INITIALDISK_SIZES = [25, 100]
LOCALDISK_SIZES = [25, 100, 150, 200, 300]
SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
NIC_SPEEDS = [10, 100, 1000]
try:
import SoftLayer
from SoftLayer import VSManager
HAS_SL = True
vsManager = VSManager(SoftLayer.create_client_from_env())
except ImportError:
HAS_SL = False
def create_virtual_instance(module):
instances = vsManager.list_instances(
hostname = module.params.get('hostname'),
domain = module.params.get('domain'),
datacenter = module.params.get('datacenter')
)
if instances:
return False, None
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
module.params['image_id'] = ''
elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
module.params['os_code'] = ''
module.params['disks'] = [] # Blank out disks since it will use the template
else:
return False, None
tags = module.params.get('tags')
if isinstance(tags, list):
tags = ','.join(map(str, module.params.get('tags')))
instance = vsManager.create_instance(
hostname = module.params.get('hostname'),
domain = module.params.get('domain'),
cpus = module.params.get('cpus'),
memory = module.params.get('memory'),
hourly = module.params.get('hourly'),
datacenter = module.params.get('datacenter'),
os_code = module.params.get('os_code'),
image_id = module.params.get('image_id'),
local_disk = module.params.get('local_disk'),
disks = module.params.get('disks'),
ssh_keys = module.params.get('ssh_keys'),
nic_speed = module.params.get('nic_speed'),
private = module.params.get('private'),
public_vlan = module.params.get('public_vlan'),
private_vlan = module.params.get('private_vlan'),
dedicated = module.params.get('dedicated'),
post_uri = module.params.get('post_uri'),
tags = tags)
if instance is not None and instance['id'] > 0:
return True, instance
else:
return False, None
def wait_for_instance(module,id):
instance = None
completed = False
wait_timeout = time.time() + module.params.get('wait_time')
while not completed and wait_timeout > time.time():
try:
completed = vsManager.wait_for_ready(id, 10, 2)
if completed:
instance = vsManager.get_instance(id)
except:
completed = False
return completed, instance
def cancel_instance(module):
canceled = True
if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
tags = module.params.get('tags')
if isinstance(tags, basestring):
tags = [module.params.get('tags')]
instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain'))
for instance in instances:
try:
vsManager.cancel_instance(instance['id'])
except:
canceled = False
elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
try:
vsManager.cancel_instance(instance['id'])
except:
canceled = False
else:
return False, None
return canceled, None
def main():
module = AnsibleModule(
argument_spec=dict(
instance_id=dict(),
hostname=dict(),
domain=dict(),
datacenter=dict(choices=DATACENTERS),
tags=dict(),
hourly=dict(type='bool', default=True),
private=dict(type='bool', default=False),
dedicated=dict(type='bool', default=False),
local_disk=dict(type='bool', default=True),
cpus=dict(type='int', choices=CPU_SIZES),
memory=dict(type='int', choices=MEMORY_SIZES),
disks=dict(type='list', default=[25]),
os_code=dict(),
image_id=dict(),
nic_speed=dict(type='int', choices=NIC_SPEEDS),
public_vlan=dict(),
private_vlan=dict(),
ssh_keys=dict(type='list', default=[]),
post_uri=dict(),
state=dict(default='present', choices=STATES),
wait=dict(type='bool', default=True),
wait_time=dict(type='int', default=600)
)
)
if not HAS_SL:
module.fail_json(msg='softlayer python library required for this module')
if module.params.get('state') == 'absent':
(changed, instance) = cancel_instance(module)
elif module.params.get('state') == 'present':
(changed, instance) = create_virtual_instance(module)
if module.params.get('wait') is True and instance:
(changed, instance) = wait_for_instance(module, instance['id'])
module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
petewarden/tensorflow
|
tensorflow/python/tf_program/mlir_gen.py
|
9
|
17226
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""mlir_gen: Generate mlir code from python code."""
# pylint: disable=invalid-name
# pylint: disable=missing-function-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast as ast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import reaching_fndefs
import tensorflow.python.tf_program.pywrap_tfd as tfp
from tensorflow.python.types import core
class SymbolTable(object):
"""Symbol Table for python code."""
def __init__(self):
self.symbols = []
self.enter_scope()
def enter_scope(self):
"""Enter a new scope - at function level."""
self.symbols.append({'types': {}, 'symbols': {}})
self.curr_table = self.symbols[len(self.symbols) - 1]
def insert_symbol(self, name, value):
self.curr_table['symbols'][name] = value
self.curr_table['types'][name] = value.getType()
return value
def insert_type(self, name, type_):
self.curr_table['types'][name] = type_
def exit_scope(self):
self.symbols.pop()
self.curr_table = self.symbols[len(self.symbols) - 1]
def lookup(self, name):
curr_idx = len(self.symbols) - 1
while curr_idx >= 0 and (name not in self.symbols[curr_idx]['symbols']):
curr_idx -= 1
if curr_idx < 0:
return None
return self.symbols[curr_idx]['symbols'][name]
def lookup_type(self, name):
curr_idx = len(self.symbols) - 1
while curr_idx >= 0 and (name not in self.symbols[curr_idx]['types']):
curr_idx -= 1
if curr_idx < 0:
return None
return self.symbols[curr_idx]['types'][name]
def __repr__(self):
s = '\n'.join(
' ' * idx * 2 + str(table) for idx, table in enumerate(self.symbols))
return s
class ProcessType(ast.NodeVisitor):
"""Visit a node and return processed type Currently only visits annotations and gives their type.
"""
def __init__(self, prog, ctx):
self.prog = prog
self.ctx = ctx
def visit_Attribute(self, node):
# Supported: core.Tensor
value = self.visit(node.value)
if value is None or not hasattr(value, node.attr):
raise AttributeError(str(type(value)) + ' has no attribute ' + node.attr)
attr = getattr(value, node.attr)
if attr == core.Tensor:
return tfp.UnrankedTensorType.get(tfp.IntegerType.get(self.prog.ctx, 32))
return attr
def visit_Name(self, node):
if node.id == 'int':
return tfp.IntegerType.get(self.prog.ctx, 32)
if node.id == 'bool':
return tfp.IntegerType.get(self.prog.ctx, 1)
if node.id in self.ctx.info.namespace:
return self.ctx.info.namespace[node.id]
class MLIRGen(ast.NodeVisitor):
"""Visit the AST and generate MLIR code Requires liveness, reading_definitions.
"""
def __init__(self, ctx):
self.ctx = ctx
self.symbol_table = SymbolTable()
self.prog = tfp.TFProgram()
self.opbuilder = None
def visit_block(self, block):
return [self.visit(item) for item in block]
def process_type(self, node):
return ProcessType(self.prog, self.ctx).visit(node)
def visit_Assign(self, node):
value = self.visit(node.value)
if isinstance(value, tuple):
# If it is a tuple of values, assign one to each in targets
# TODO: This currently is assuming that all elts in targets[0] are Name
# objects. This might not be always True.
for key, val in zip(node.targets[0].elts, value):
self.symbol_table.insert_symbol(key.id, val)
else:
self.symbol_table.insert_symbol(node.targets[0].id, value)
def visit_BinOp(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
if isinstance(node.op, ast.Sub):
return tfp.Tf_SubOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(),
left, right).getResult(0)
if isinstance(node.op, ast.Add):
return tfp.Tf_AddV2Op.create(self.opbuilder,
self.opbuilder.getUnknownLoc(), left,
right).getResult(0)
def visit_BoolOp(self, node):
values = [self.visit(value) for value in node.values]
if isinstance(node.op, ast.Or):
return tfp.OrOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(),
values).getResult(0)
if isinstance(node.op, ast.And):
return tfp.AndOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(),
values).getResult(0)
def visit_Call(self, node):
func = self.visit(node.func)
args = [self.visit(arg) for arg in node.args]
callop = tfp.Tf_LegacyCallOp.create(self.opbuilder,
self.opbuilder.getUnknownLoc(),
func.getType().getResults(), args,
func.getName())
if callop.getNumResults() == 1:
return callop[0]
return tuple(callop.getResult(idx) for idx in range(callop.getNumResults()))
def visit_Compare(self, node):
left = self.visit(node.left)
opb = self.opbuilder
for op, right in zip(node.ops, node.comparators):
if isinstance(op, ast.Eq):
left = tfp.Tf_EqualOp.create(opb, opb.getUnknownLoc(), left,
self.visit(right)).getResult(0)
elif isinstance(op, ast.Lt):
left = tfp.Tf_LessOp.create(opb, opb.getUnknownLoc(), left,
self.visit(right)).getResult(0)
elif isinstance(op, ast.LtE):
left = tfp.Tf_LessEqualOp.create(opb, opb.getUnknownLoc(), left,
self.visit(right)).getResult(0)
elif isinstance(op, ast.Gt):
left = tfp.Tf_GreaterOp.create(opb, opb.getUnknownLoc(), left,
self.visit(right)).getResult(0)
elif isinstance(op, ast.GtE):
left = tfp.Tf_GreaterEqualOp.create(opb, opb.getUnknownLoc(), left,
self.visit(right)).getResult(0)
elif isinstance(op, ast.NotEq):
left = tfp.Tf_NotEqualOp.create(opb, opb.getUnknownLoc(), left,
self.visit(right)).getResult(0)
else:
raise NotImplementedError('CompareOp operator not recognized')
return left
def visit_Constant(self, node):
opb = self.opbuilder
value = None
if isinstance(node.value, int):
value = tfp.Tf_ConstOp.create(
opb, opb.getUnknownLoc(),
tfp.IntegerAttr.get(
tfp.IntegerType.get(self.prog.ctx, 32), node.value)).getResult(0)
return value
def visit_FunctionDef(self, node):
# Cache the current builder
cache_builder = self.opbuilder
inputs, outputs = [], []
for arg in node.args.args:
inputs.append(self.process_type(arg.annotation))
if node.returns:
outputs = [self.process_type(node.returns)]
currfunc = self.prog.add_function(
self.ctx.namer.new_symbol(node.name, []),
self.prog.get_function_type(inputs, outputs))
# Add the function to symbol table and enter new scope
self.symbol_table.insert_symbol(node.name, currfunc)
self.symbol_table.enter_scope()
# Add arguments to symbol table
for arg, value in zip(node.args.args, currfunc.getArguments()):
self.symbol_table.insert_symbol(arg.id, value)
self.opbuilder = tfp.OpBuilder(currfunc.getBody())
self.visit_block(node.body)
self.symbol_table.exit_scope()
self.opbuilder = cache_builder
def visit_If(self, node):
cond = self.visit(node.test)
# Create ifop
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
modified_in_cond = list(body_scope.modified | orelse_scope.modified)
outputs = [
self.symbol_table.lookup_type(str(var)) for var in modified_in_cond
]
ifop = tfp.IfOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(), cond,
outputs)
# Cache the builder
cache_builder = self.opbuilder
# Visit body
self.opbuilder = tfp.OpBuilder(ifop.getRegion(0))
# Enter scope to avoid values generated inside the region to come in symbol
# table
self.symbol_table.enter_scope()
for stmt in node.body:
self.visit(stmt)
retvals = [
self.symbol_table.lookup(str(varname)) for varname in modified_in_cond
]
tfp.ReturnOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(), retvals)
self.symbol_table.exit_scope()
# Visit orelse
self.opbuilder = tfp.OpBuilder(ifop.getRegion(1))
self.symbol_table.enter_scope()
for stmt in node.orelse:
self.visit(stmt)
retvals = [
self.symbol_table.lookup(str(varname)) for varname in modified_in_cond
]
tfp.ReturnOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(), retvals)
self.symbol_table.exit_scope()
# Reset builder and enter return values in symbol table
self.opbuilder = cache_builder
for idx, var in enumerate(modified_in_cond):
self.symbol_table.insert_symbol(str(var), ifop.getResult(idx))
if ifop.getNumResults() == 1:
return ifop.getResult(0)
return tuple(ifop.getResult(i) for i in range(ifop.getNumResults()))
def visit_Name(self, node):
if self.symbol_table.lookup(node.id):
return self.symbol_table.lookup(node.id)
raise NotImplementedError('Symbol not found' + node.id)
def visit_Return(self, node):
opb = self.opbuilder
value = self.visit(node.value)
if isinstance(value, tuple):
# For more than one return values
return tfp.ReturnOp.create(opb, opb.getUnknownLoc(), list(value))
return tfp.ReturnOp.create(opb, opb.getUnknownLoc(), [value])
def visit_Tuple(self, node):
return tuple(self.visit(elt) for elt in node.elts)
def visit_UnaryOp(self, node):
operand = self.visit(node.operand)
if isinstance(node.op, ast.USub):
return tfp.Tf_NegOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(),
operand).getResult(0)
def _get_basic_loop_vars(self, modified, live_in, live_out):
# [This is directly from
# tensorflow/python/autograph/converters/control_flow.py]
# The loop variables corresponding to simple symbols (e.g. `x`).
basic_loop_vars = []
for s in modified:
if s.is_composite():
# TODO: Raise an error when this happens for a TF loop.
continue
# Variables not live into or out of the loop are considered local to the
# loop.
if s not in live_in and s not in live_out:
continue
basic_loop_vars.append(s)
return frozenset(basic_loop_vars)
def _get_composite_loop_vars(self, modified, live_in):
# [This is directly from
# tensorflow/python/autograph/converters/control_flow.py]
# The loop variables corresponding to composite symbols (e.g. `self.x`).
composite_loop_vars = []
for s in modified:
if not s.is_composite():
continue
# Mutations made to objects created inside the loop will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the loop.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the loop, but x is not.
#
# Note that some parents might not be symbols - for example, in x['foo'],
# 'foo' is a parent, but it's a literal, not a symbol. We don't check the
# liveness of literals.
support_set_symbols = tuple(
sss for sss in s.support_set if sss.is_symbol())
if not all(sss in live_in for sss in support_set_symbols):
continue
composite_loop_vars.append(s)
return frozenset(composite_loop_vars)
def _get_loop_vars(self, node, modified):
# [This is directly from python/autograph/converters/control_flow.py]
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
reserved_symbols = body_scope.referenced
basic_loop_vars = self._get_basic_loop_vars(modified, live_in, live_out)
composite_loop_vars = self._get_composite_loop_vars(modified, live_in)
loop_vars = tuple(basic_loop_vars | composite_loop_vars)
# Variable that are used or defined inside the loop, but not defined
# before entering the loop. Only simple variables must be defined. The
# composite ones will be implicitly checked at runtime.
undefined_lives = basic_loop_vars - defined_in
return loop_vars, reserved_symbols, undefined_lives
def visit_While(self, node):
# Create a new WhileOp
# `inputs` are initial values for loop variables
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
loop_vars, _, _ = self._get_loop_vars(node, body_scope.modified)
inputs = [self.symbol_table.lookup(str(name)) for name in loop_vars]
types = [input_.getType() for input_ in inputs]
while_op = tfp.WhileOp.create(self.opbuilder,
self.opbuilder.getUnknownLoc(), inputs, types)
# cache the current builder
cache_builder = self.opbuilder
# Process cond
self.symbol_table.enter_scope()
for input_, type_ in zip(loop_vars, types):
self.symbol_table.insert_symbol(
str(input_),
while_op.getRegion(0).front().addArgument(type_))
self.opbuilder = tfp.OpBuilder(while_op.getRegion(0))
tfp.ReturnOp.create(self.opbuilder, self.opbuilder.getUnknownLoc(),
[self.visit(node.test)])
self.symbol_table.exit_scope()
# Process body
self.symbol_table.enter_scope()
for input_, type_ in zip(loop_vars, types):
self.symbol_table.insert_symbol(
str(input_),
while_op.getRegion(1).front().addArgument(type_))
self.opbuilder = tfp.OpBuilder(while_op.getRegion(1))
self.visit_block(node.body)
tfp.ReturnOp.create(
self.opbuilder, self.opbuilder.getUnknownLoc(),
[self.symbol_table.lookup(str(name)) for name in loop_vars])
self.symbol_table.exit_scope()
# Enter new values as symbols
for idx, var in enumerate(loop_vars):
self.symbol_table.insert_symbol(str(var), while_op.getResult(idx))
# Restore builder
self.opbuilder = cache_builder
def mlir_gen_internal(node, entity_info):
"""Returns mlir module for unprocessed node `node`."""
namer = naming.Namer({})
graphs = cfg.build(node)
ctx = transformer.Context(entity_info, namer, None)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = liveness.resolve(node, ctx, graphs)
mlir_generator = MLIRGen(ctx)
mlir_generator.visit(node)
return mlir_generator.prog
def mlir_gen(func):
"""Parse a function and return TFProgram."""
node, source = parser.parse_entity(func, future_features=())
entity_info = transformer.EntityInfo(
name=func.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace=inspect_utils.getnamespace(func))
return mlir_gen_internal(node, entity_info)
def mlir_gen_from_source(source=None, src_file=None):
"""Parse a function as either a string or from a supplied file path and return a TFProgram.
"""
if source is None:
source = open(src_file).read()
node = ast.parse(source)
entity_info = transformer.EntityInfo(
name='mlir_module',
source_code=source,
source_file=None,
future_features=(),
namespace={})
return mlir_gen_internal(node, entity_info)
|
apache-2.0
|
dgarnier/pyms
|
Utils/__init__.py
|
7
|
1571
|
"""
Utility functions for PyMS wide use
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
|
gpl-2.0
|
Salat-Cx65/python-for-android
|
python3-alpha/python3-src/Lib/test/test_tempfile.py
|
46
|
35200
|
# tempfile.py unit tests.
import tempfile
import os
import sys
import re
import warnings
import unittest
from test import support
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class test_TemporaryDirectory(TC):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
try:
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("TemporaryDirectory")
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
self.do_create(tmp.name, pre, suf, recurse-1)
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(os.error):
tempfile.TemporaryDirectory(dir=nonexistent)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create()
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
@unittest.expectedFailure # See issue #10188
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
# Make sure it works with the relevant modules nulled out
with self.do_create() as dir:
d = self.do_create(dir=dir)
# Mimic the nulling out of modules that
# occurs during system shutdown
modules = [os, os.path]
if has_stat:
modules.append(stat)
# Currently broken, so suppress the warning
# that is otherwise emitted on stdout
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
# Currently broken, so stop spurious exception by
# indicating the object has already been closed
d._closed = True
# And this assert will fail, as expected by the
# unittest decorator...
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
def test_warnings_on_cleanup(self):
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
try:
d.cleanup()
d.cleanup()
except:
self.failOnException("cleanup")
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
test_classes.append(test_TemporaryDirectory)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
guewen/odoo
|
addons/l10n_gt/__init__.py
|
411
|
1113
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jenalgit/django
|
tests/foreign_object/models.py
|
73
|
5770
|
import datetime
from django.db import models
from django.db.models.fields.related import \
ReverseSingleRelatedObjectDescriptor
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import get_language
@python_2_unicode_compatible
class Country(models.Model):
# Table Column Fields
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Person(models.Model):
# Table Column Fields
name = models.CharField(max_length=128)
person_country_id = models.IntegerField()
# Relation Fields
person_country = models.ForeignObject(
Country, from_fields=['person_country_id'], to_fields=['id'])
friends = models.ManyToManyField('self', through='Friendship', symmetrical=False)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
# Table Column Fields
name = models.CharField(max_length=128)
group_country = models.ForeignKey(Country)
members = models.ManyToManyField(Person, related_name='groups', through='Membership')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Membership(models.Model):
# Table Column Fields
membership_country = models.ForeignKey(Country)
date_joined = models.DateTimeField(default=datetime.datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
person_id = models.IntegerField()
group_id = models.IntegerField()
# Relation Fields
person = models.ForeignObject(
Person,
from_fields=['membership_country', 'person_id'],
to_fields=['person_country_id', 'id'])
group = models.ForeignObject(
Group,
from_fields=['membership_country', 'group_id'],
to_fields=['group_country', 'id'])
class Meta:
ordering = ('date_joined', 'invite_reason')
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Friendship(models.Model):
# Table Column Fields
from_friend_country = models.ForeignKey(Country, related_name="from_friend_country")
from_friend_id = models.IntegerField()
to_friend_country_id = models.IntegerField()
to_friend_id = models.IntegerField()
# Relation Fields
from_friend = models.ForeignObject(
Person,
from_fields=['from_friend_country', 'from_friend_id'],
to_fields=['person_country_id', 'id'],
related_name='from_friend')
to_friend_country = models.ForeignObject(
Country,
from_fields=['to_friend_country_id'],
to_fields=['id'],
related_name='to_friend_country')
to_friend = models.ForeignObject(
Person,
from_fields=['to_friend_country_id', 'to_friend_id'],
to_fields=['person_country_id', 'id'],
related_name='to_friend')
class ArticleTranslationDescriptor(ReverseSingleRelatedObjectDescriptor):
"""
The set of articletranslation should not set any local fields.
"""
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.field.name)
setattr(instance, self.cache_name, value)
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ColConstraint(object):
# Anything with as_sql() method works in get_extra_restriction().
def __init__(self, alias, col, value):
self.alias, self.col, self.value = alias, col, value
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return '%s.%s = %%s' % (qn(self.alias), qn(self.col)), [self.value]
class ActiveTranslationField(models.ForeignObject):
"""
This field will allow querying and fetching the currently active translation
for Article from ArticleTranslation.
"""
requires_unique_target = False
def get_extra_restriction(self, where_class, alias, related_alias):
return ColConstraint(alias, 'lang', get_language())
def get_extra_descriptor_filter(self):
return {'lang': get_language()}
def contribute_to_class(self, cls, name):
super(ActiveTranslationField, self).contribute_to_class(cls, name)
setattr(cls, self.name, ArticleTranslationDescriptor(self))
@python_2_unicode_compatible
class Article(models.Model):
active_translation = ActiveTranslationField(
'ArticleTranslation',
from_fields=['id'],
to_fields=['article'],
related_name='+',
null=True)
pub_date = models.DateField()
def __str__(self):
try:
return self.active_translation.title
except ArticleTranslation.DoesNotExist:
return '[No translation found]'
class NewsArticle(Article):
pass
class ArticleTranslation(models.Model):
article = models.ForeignKey(Article)
lang = models.CharField(max_length=2)
title = models.CharField(max_length=100)
body = models.TextField()
abstract = models.CharField(max_length=400, null=True)
class Meta:
unique_together = ('article', 'lang')
ordering = ('active_translation__title',)
class ArticleTag(models.Model):
article = models.ForeignKey(Article, related_name="tags", related_query_name="tag")
name = models.CharField(max_length=255)
class ArticleIdea(models.Model):
articles = models.ManyToManyField(Article, related_name="ideas",
related_query_name="idea_things")
name = models.CharField(max_length=255)
|
bsd-3-clause
|
Lekanich/intellij-community
|
python/helpers/docutils/parsers/rst/languages/fr.py
|
57
|
3577
|
# $Id: fr.py 4564 2006-05-21 20:44:42Z wiemann $
# Authors: David Goodger <[email protected]>; William Dode
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'attention': 'attention',
u'pr\u00E9caution': 'caution',
u'danger': 'danger',
u'erreur': 'error',
u'conseil': 'hint',
u'important': 'important',
u'note': 'note',
u'astuce': 'tip',
u'avertissement': 'warning',
u'admonition': 'admonition',
u'encadr\u00E9': 'sidebar',
u'sujet': 'topic',
u'bloc-textuel': 'line-block',
u'bloc-interpr\u00E9t\u00E9': 'parsed-literal',
u'code-interpr\u00E9t\u00E9': 'parsed-literal',
u'intertitre': 'rubric',
u'exergue': 'epigraph',
u'\u00E9pigraphe': 'epigraph',
u'chapeau': 'highlights',
u'accroche': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#u'questions': 'questions',
#u'qr': 'questions',
#u'faq': 'questions',
u'tableau': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'm\u00E9ta': 'meta',
#u'imagemap (translation required)': 'imagemap',
u'image': 'image',
u'figure': 'figure',
u'inclure': 'include',
u'brut': 'raw',
u'remplacer': 'replace',
u'remplace': 'replace',
u'unicode': 'unicode',
u'date': 'date',
u'classe': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'titre (translation required)': 'title',
u'sommaire': 'contents',
u'table-des-mati\u00E8res': 'contents',
u'sectnum': 'sectnum',
u'section-num\u00E9rot\u00E9e': 'sectnum',
u'liens': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
}
"""French name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abr\u00E9viation': 'abbreviation',
u'acronyme': 'acronym',
u'sigle': 'acronym',
u'index': 'index',
u'indice': 'subscript',
u'ind': 'subscript',
u'exposant': 'superscript',
u'exp': 'superscript',
u'titre-r\u00E9f\u00E9rence': 'title-reference',
u'titre': 'title-reference',
u'pep-r\u00E9f\u00E9rence': 'pep-reference',
u'rfc-r\u00E9f\u00E9rence': 'rfc-reference',
u'emphase': 'emphasis',
u'fort': 'strong',
u'litt\u00E9ral': 'literal',
u'nomm\u00E9e-r\u00E9f\u00E9rence': 'named-reference',
u'anonyme-r\u00E9f\u00E9rence': 'anonymous-reference',
u'note-r\u00E9f\u00E9rence': 'footnote-reference',
u'citation-r\u00E9f\u00E9rence': 'citation-reference',
u'substitution-r\u00E9f\u00E9rence': 'substitution-reference',
u'lien': 'target',
u'uri-r\u00E9f\u00E9rence': 'uri-reference',
u'brut': 'raw',}
"""Mapping of French role names to canonical role names for interpreted text.
"""
|
apache-2.0
|
ddrmanxbxfr/servo
|
tests/wpt/css-tests/tools/manifest/utils.py
|
115
|
1374
|
import platform
import os
from six import BytesIO
def rel_path_to_url(rel_path, url_base="/"):
assert not os.path.isabs(rel_path)
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def from_os_path(path):
assert os.path.sep == "/" or platform.system() == "Windows"
rv = path.replace(os.path.sep, "/")
if "\\" in rv:
raise ValueError("path contains \\ when separator is %s" % os.path.sep)
return rv
def to_os_path(path):
assert os.path.sep == "/" or platform.system() == "Windows"
if "\\" in path:
raise ValueError("normalised path contains \\")
return path.replace("/", os.path.sep)
class ContextManagerBytesIO(BytesIO):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name not in obj.__dict__:
obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return obj.__dict__[self.name]
|
mpl-2.0
|
atruberg/django-custom
|
setup.py
|
48
|
4563
|
import os
import sys
from distutils.core import setup
from distutils.sysconfig import get_python_lib
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join)
in a platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
def is_package(package_name):
for pkg in EXCLUDE_FROM_PACKAGES:
if package_name.startswith(pkg):
return False
return True
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, package_data = [], {}
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
django_dir = 'django'
for dirpath, dirnames, filenames in os.walk(django_dir):
# Ignore PEP 3147 cache dirs and those whose names start with '.'
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d != '__pycache__']
parts = fullsplit(dirpath)
package_name = '.'.join(parts)
if '__init__.py' in filenames and is_package(package_name):
packages.append(package_name)
elif filenames:
relative_path = []
while '.'.join(parts) not in packages:
relative_path.append(parts.pop())
relative_path.reverse()
path = os.path.join(*relative_path)
package_files = package_data.setdefault('.'.join(parts), [])
package_files.extend([os.path.join(path, f) for f in filenames])
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='http://www.djangoproject.com/',
author='Django Software Foundation',
author_email='[email protected]',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=packages,
package_data=package_data,
scripts=['django/bin/django-admin.py'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
|
bsd-3-clause
|
ecular/qemu_seamless
|
scripts/tracetool/backend/simple.py
|
57
|
3099
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple built-in backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def is_string(arg):
strtype = ('const char*', 'char*', 'const char *', 'char *')
if arg.lstrip().startswith(strtype):
return True
else:
return False
def c(events):
out('#include "trace.h"',
'#include "trace/simple.h"',
'',
'TraceEvent trace_list[] = {')
for e in events:
out('{.tp_name = "%(name)s", .state=0},',
name = e.name,
)
out('};',
'')
for num, event in enumerate(events):
out('void trace_%(name)s(%(args)s)',
'{',
' TraceBufferRecord rec;',
name = event.name,
args = event.args,
)
sizes = []
for type_, name in event.args:
if is_string(type_):
out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
name = name,
)
strsizeinfo = "4 + arg%s_len" % name
sizes.append(strsizeinfo)
else:
sizes.append("8")
sizestr = " + ".join(sizes)
if len(event.args) == 0:
sizestr = '0'
out('',
' if (!trace_list[%(event_id)s].state) {',
' return;',
' }',
'',
' if (trace_record_start(&rec, %(event_id)s, %(size_str)s)) {',
' return; /* Trace Buffer Full, Event Dropped ! */',
' }',
event_id = num,
size_str = sizestr,
)
if len(event.args) > 0:
for type_, name in event.args:
# string
if is_string(type_):
out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
name = name,
)
# pointer var (not string)
elif type_.endswith('*'):
out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);',
name = name,
)
# primitive data type
else:
out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
name = name,
)
out(' trace_record_finish(&rec);',
'}',
'')
def h(events):
out('#include "trace/simple.h"',
'')
for event in events:
out('void trace_%(name)s(%(args)s);',
name = event.name,
args = event.args,
)
out('')
out('#define NR_TRACE_EVENTS %d' % len(events))
out('extern TraceEvent trace_list[NR_TRACE_EVENTS];')
|
gpl-2.0
|
JTCCOIN/jtccoin
|
share/qt/make_spinner.py
|
4415
|
1035
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
mit
|
jopham/gothub
|
web/main.py
|
2
|
5461
|
import web
import logging
#import model
#import markdown
import pymongo
from pymongo.code import Code
import re
from pymongo.objectid import ObjectId
import json
from datetime import datetime
from sets import Set
from collections import defaultdict
logging.basicConfig(level=logging.INFO)
conn = pymongo.Connection()
db = conn.processed
urls = (
'/', 'Index',
'/stats', 'Stats',
'/test', 'Test',
'/query', 'Query',
'/projects', 'Projects'
)
render = web.template.render('templates/')
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return "new Date('%s')" % obj.ctime()
elif isinstance(obj, ObjectId):
return str(obj)
else:
return json.JSONEncoder.default(self, obj)
class Index:
def GET(self):
raise web.seeother('/static/map.html?presentationMode=1')
class Test:
def GET(self):
params = web.input()
if params.has_key('name'):
return "<h1>" + params.name + "</h1>"
else:
return "<h1> no name </h1>"
class Projects:
def GET(self):
map = Code("""
function() {
emit(this.project, 1);
}
""");
reduce = Code("""
function(key, values) {
var total = 0;
values.forEach(function(v) {
total += v
});
return total;
}
""");
pattern = re.compile('^' + web.input().term + '.*')
result = db.commits.map_reduce(map, reduce, query={'project': pattern})
results = []
for item in sorted(result.find(), key=lambda item: -item['value'])[:10]:
results.append({'label': item['_id'], 'value': item['_id'], 'count': item['value']})
return json.dumps(results)
class Query:
def GET(self):
params = web.input()
query = {}
linkAggregation = False
linkMaxScale = False
#logging.info(params.keys())
results = {'links': [], 'locations': [], 'stats': {}}
if params.has_key('project'):
query['project'] = params.project
if params.has_key('lat_min') and params.has_key('lat_max'):
query['lat'] = {"$gt" : float(params.lat_min), "$lt" : float(params.lat_max)}
if params.has_key('long_max') and params.has_key('long_min'):
query['long'] = {"$gt" : float(params.long_min), "$lt" : float(params.long_max)}
if params.has_key('date_start') and params.has_key('date_end'):
date_start = params.date_start.split('/')
date_end = params.date_end.split('/')
# expected web input: MM/DD/YYYY
# datetime input: year, month, day
query['date'] = {"$gt" : datetime(int(date_start[2]), int(date_start[0]), int(date_start[1])),
"$lt" : datetime(int(date_end[2]), int(date_end[0]), int(date_end[1])) }
if params.has_key('linkAggregation'):
if params['linkAggregation'] == "True":
linkAggregation = True
if params.has_key('linkMaxScale'):
linkMaxScale = params['linkMaxScale']
logging.info(query)
cursor = db.commits.find(query)
if params.has_key('sort') and params.sort == "1":
cursor = cursor.sort("date", 1)
seen = set()
locations = {}
commit_count = 0
author_count = 0
link_count = 0
links = defaultdict(int)
for c in cursor:
if (not c['sha1'] in seen) and c.has_key('lat') and c.has_key('long'):
commit_count += 1
geo = (c['lat'], c['long'])
author = c['author']
location = ''
if 'location' in c:
location = c['location']
else:
location = None
if geo not in locations:
locations[geo] = {'_locations': []}
if author not in locations[geo]:
if location: locations[geo]['_locations'].append(location)
locations[geo][author] = True
author_count += 1
for p_sha1 in c['parents']:
p = db.commits.find_one({'sha1': p_sha1})
if p and p.has_key('lat') and p.has_key('long') and (p['lat'] != c['lat']) and (p['long'] != c['long']) :
if linkAggregation:
#results['links'].append([c, p])
key = ((c['long'], c['lat']),(p['long'], p['lat']), c['date'])
links[key] = links[key] + 1
else:
results['links'].append(((c['long'], c['lat']),(p['long'], p['lat']), c['date']))
link_count += 1
seen.add(c['sha1'])
for key, val in locations.iteritems():
hash = {'lat': key[0], 'long': key[1]}
hash['loc_count'] = max(len(val) - 1, 0)
hash['authors'] = [e for e in val.keys() if e != '_locations']
hash['locations'] = [loc for loc in Set(([e for e in val['_locations']]))]
results['locations'].append(hash)
results['stats']['commit_count'] = commit_count
results['stats']['author_count'] = author_count
results['stats']['link_count'] = link_count
if linkAggregation:
results['links'] = links.items()
return "jsonpcallback("+json.dumps(results, cls=DateEncoder)+")"
class Stats:
def GET(self):
# db -> collections
dbs = {
'raw' : ['commits', 'repos', 'users'],
'queue' : ['commits', 'repos', 'users'],
'processed' : ['commits']
}
return render.stats(conn,dbs)
app = web.application(urls, globals())
if __name__ == '__main__':
app.run()
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
|
mit
|
mgyenik/micropython
|
tests/misc/features.py
|
87
|
1952
|
# mad.py
# Alf Clement 27-Mar-2014
#
zero=0
three=3
print("1")
print("2")
print(three)
print("{}".format(4))
five=25//5
print(int(five))
j=0
for i in range(4):
j += i
print(j)
print(3+4)
try:
a=4//zero
except:
print(8)
print("xxxxxxxxx".count("x"))
def ten():
return 10
print(ten())
a=[]
for i in range(13):
a.append(i)
print(a[11])
print(a[-1])
str="0123456789"
print(str[1]+str[3])
def p(s):
print(s)
p("14")
p(15)
class A:
def __init__(self):
self.a=16
def print(self):
print(self.a)
def set(self,b):
self.a=b
a=A()
a.print()
a.set(17)
a.print()
b=A()
b.set(a.a + 1)
b.print()
for i in range(20):
pass
print(i)
if 20 > 30:
a="1"
else:
a="2"
if 0 < 4:
print(a+"0")
else:
print(a+"1")
a=[20,21,22,23,24]
for i in a:
if i < 21:
continue
if i > 21:
break
print(i)
b=[a,a,a]
print(b[1][2])
print(161//7)
a=24
while True:
try:
def gcheck():
global a
print(a)
gcheck()
class c25():
x=25
x=c25()
print(x.x)
raise
except:
print(26)
print(27+zero)
break
print(28)
k=29
def f():
global k
k = yield k
print(next(f()))
while True:
k+= 1
if k < 30:
continue
break
print(k)
for i in [1,2,3]:
class A():
def __init__(self, c):
self.a = i+10*c
b = A(3)
print(b.a)
print(34)
p=0
for i in range(35, -1, -1):
print(i)
p = p + 1
if p > 0:
break
p=36
while p == 36:
print(p)
p=37
print(p)
for i in [38]:
print(i)
print(int(exec("def foo(): return 38") == None)+foo())
d = {}
exec("def bar(): return 40", d)
print(d["bar"]())
def fib2(n):
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
print(fib2(100)[-2]-14)
Answer={}
Answer["ForAll"]=42
print(Answer["ForAll"])
i = 43
def f(i=i):
print(i)
i = 44
f()
print(i)
while True:
try:
if None != True:
print(45)
break
else:
print(0)
except:
print(0)
print(46)
print(46+1)
def u(p):
if p > 3:
return 3*p
else:
return u(2*p)-3*u(p)
print(u(16))
def u49():
return 49
print(u49())
|
mit
|
modulexcite/letsencrypt
|
acme/acme/crypto_util.py
|
27
|
7056
|
"""Crypto utilities."""
import contextlib
import logging
import socket
import sys
from six.moves import range # pylint: disable=import-error,redefined-builtin
import OpenSSL
from acme import errors
logger = logging.getLogger(__name__)
# DVSNI certificate serving and probing is not affected by SSL
# vulnerabilities: prober needs to check certificate for expected
# contents anyway. Working SNI is the only thing that's necessary for
# the challenge and thus scoping down SSL/TLS method (version) would
# cause interoperability issues: TLSv1_METHOD is only compatible with
# TLSv1_METHOD, while SSLv23_METHOD is compatible with all other
# methods, including TLSv2_METHOD (read more at
# https://www.openssl.org/docs/ssl/SSLv23_method.html). _serve_sni
# should be changed to use "set_options" to disable SSLv2 and SSLv3,
# in case it's used for things other than probing/serving!
_DEFAULT_DVSNI_SSL_METHOD = OpenSSL.SSL.SSLv23_METHOD
def _serve_sni(certs, sock, reuseaddr=True, method=_DEFAULT_DVSNI_SSL_METHOD,
accept=None):
"""Start SNI-enabled server, that drops connection after handshake.
:param certs: Mapping from SNI name to ``(key, cert)`` `tuple`.
:param sock: Already bound socket.
:param bool reuseaddr: Should `socket.SO_REUSEADDR` be set?
:param method: See `OpenSSL.SSL.Context` for allowed values.
:param accept: Callable that doesn't take any arguments and
returns ``True`` if more connections should be served.
"""
def _pick_certificate(connection):
try:
key, cert = certs[connection.get_servername()]
except KeyError:
return
new_context = OpenSSL.SSL.Context(method)
new_context.use_privatekey(key)
new_context.use_certificate(cert)
connection.set_context(new_context)
if reuseaddr:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.listen(1) # TODO: add func arg?
while accept is None or accept():
server, addr = sock.accept()
logger.debug('Received connection from %s', addr)
with contextlib.closing(server):
context = OpenSSL.SSL.Context(method)
context.set_tlsext_servername_callback(_pick_certificate)
server_ssl = OpenSSL.SSL.Connection(context, server)
server_ssl.set_accept_state()
try:
server_ssl.do_handshake()
server_ssl.shutdown()
except OpenSSL.SSL.Error as error:
raise errors.Error(error)
def probe_sni(name, host, port=443, timeout=300,
method=_DEFAULT_DVSNI_SSL_METHOD, source_address=('0', 0)):
"""Probe SNI server for SSL certificate.
:param bytes name: Byte string to send as the server name in the
client hello message.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param int timeout: Timeout in seconds.
:param method: See `OpenSSL.SSL.Context` for allowed values.
:param tuple source_address: Enables multi-path probing (selection
of source interface). See `socket.creation_connection` for more
info. Available only in Python 2.7+.
:raises acme.errors.Error: In case of any problems.
:returns: SSL certificate presented by the server.
:rtype: OpenSSL.crypto.X509
"""
context = OpenSSL.SSL.Context(method)
context.set_timeout(timeout)
socket_kwargs = {} if sys.version_info < (2, 7) else {
'source_address': source_address}
try:
# pylint: disable=star-args
sock = socket.create_connection((host, port), **socket_kwargs)
except socket.error as error:
raise errors.Error(error)
with contextlib.closing(sock) as client:
client_ssl = OpenSSL.SSL.Connection(context, client)
client_ssl.set_connect_state()
client_ssl.set_tlsext_host_name(name) # pyOpenSSL>=0.13
try:
client_ssl.do_handshake()
client_ssl.shutdown()
except OpenSSL.SSL.Error as error:
raise errors.Error(error)
return client_ssl.get_peer_certificate()
def _pyopenssl_cert_or_req_san(cert_or_req):
"""Get Subject Alternative Names from certificate or CSR using pyOpenSSL.
.. todo:: Implement directly in PyOpenSSL!
.. note:: Although this is `acme` internal API, it is used by
`letsencrypt`.
:param cert_or_req: Certificate or CSR.
:type cert_or_req: `OpenSSL.crypto.X509` or `OpenSSL.crypto.X509Req`.
:returns: A list of Subject Alternative Names.
:rtype: `list` of `unicode`
"""
# constants based on implementation of
# OpenSSL.crypto.X509Error._subjectAltNameString
parts_separator = ", "
part_separator = ":"
extension_short_name = b"subjectAltName"
if hasattr(cert_or_req, 'get_extensions'): # X509Req
extensions = cert_or_req.get_extensions()
else: # X509
extensions = [cert_or_req.get_extension(i)
for i in range(cert_or_req.get_extension_count())]
# pylint: disable=protected-access,no-member
label = OpenSSL.crypto.X509Extension._prefixes[OpenSSL.crypto._lib.GEN_DNS]
assert parts_separator not in label
prefix = label + part_separator
san_extensions = [
ext._subjectAltNameString().split(parts_separator)
for ext in extensions if ext.get_short_name() == extension_short_name]
# WARNING: this function assumes that no SAN can include
# parts_separator, hence the split!
return [part.split(part_separator)[1] for parts in san_extensions
for part in parts if part.startswith(prefix)]
def gen_ss_cert(key, domains, not_before=None,
validity=(7 * 24 * 60 * 60), force_san=True):
"""Generate new self-signed certificate.
:type domains: `list` of `unicode`
:param OpenSSL.crypto.PKey key:
:param bool force_san:
If more than one domain is provided, all of the domains are put into
``subjectAltName`` X.509 extension and first domain is set as the
subject CN. If only one domain is provided no ``subjectAltName``
extension is used, unless `force_san` is ``True``.
"""
assert domains, "Must provide one or more hostnames for the cert."
cert = OpenSSL.crypto.X509()
cert.set_serial_number(1337)
cert.set_version(2)
extensions = [
OpenSSL.crypto.X509Extension(
b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
]
cert.get_subject().CN = domains[0]
# TODO: what to put into cert.get_subject()?
cert.set_issuer(cert.get_subject())
if force_san or len(domains) > 1:
extensions.append(OpenSSL.crypto.X509Extension(
b"subjectAltName",
critical=False,
value=b", ".join(b"DNS:" + d.encode() for d in domains)
))
cert.add_extensions(extensions)
cert.gmtime_adj_notBefore(0 if not_before is None else not_before)
cert.gmtime_adj_notAfter(validity)
cert.set_pubkey(key)
cert.sign(key, "sha256")
return cert
|
apache-2.0
|
CiscoSystems/neutron
|
neutron/tests/unit/testlib_plugin.py
|
8
|
2641
|
# Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import weakref
import mock
from oslo.config import cfg
from neutron.db import agentschedulers_db
from neutron import manager
from neutron.tests import base
from neutron.tests import fake_notifier
class PluginSetupHelper(object):
"""Mixin for use with testtools.TestCase."""
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
base.bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
self.fail('The plugin for this test was not deallocated.')
def setup_coreplugin(self, core_plugin=None):
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if core_plugin is not None:
cfg.CONF.set_override('core_plugin', core_plugin)
class NotificationSetupHelper(object):
"""Mixin for use with testtools.TestCase."""
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
|
apache-2.0
|
vnsofthe/odoo-dev
|
openerp/addons/base/tests/test_orm.py
|
149
|
18110
|
from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_exists(self):
partner = self.partner.browse(self.cr, UID, [])
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.user.default_get(self.cr, UID, ['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int, long))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {
'name': 'Foo',
'login': 'foo',
'password': 'foo',
'supplier': True,
})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {
'login': 'bar',
'password': 'bar',
})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.name, 'Foo (copy)')
self.assertEqual(bar.login, 'bar')
self.assertEqual(foo.supplier, bar.supplier)
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01', 'signature': 'XXX'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(bar.login_date, "login_date should not be copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
calvingit21/h2o-2
|
py/testdir_single_jvm/notest_exec2_empty_result.py
|
9
|
1382
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_import as h2i, h2o_cmd
initList = [
('r', 'r=i.hex'),
]
exprList = [
"a=c(1,2,3); a = r[r[,1]>2,]",
"a=c(1,2,3); a = r[r[,1]>10,]",
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_NOPASS_exec2_empty_result(self):
bucket = 'smalldata'
csvPathname = 'iris/iris2.csv'
hexKey = 'i.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hexKey)
for resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=10)
start = time.time()
for execExpr in exprList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=10)
rSummary = h2o_cmd.runSummary(key="a")
h2o_cmd.infoFromSummary(rSummary)
h2o.check_sandbox_for_errors()
print "exec end on ", "operators" , 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
ltilve/ChromiumGStreamerBackend
|
chrome/tools/build/win/syzygy/instrument.py
|
28
|
5680
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script to help building Syzygy-instrumented Chrome binaries."""
import glob
import logging
import optparse
import os
import shutil
import subprocess
import sys
# The default directory containing the Syzygy toolchain.
_DEFAULT_SYZYGY_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../../../..',
'third_party/syzygy/binaries/exe/'))
# Basenames of various tools.
_INSTRUMENT_EXE = 'instrument.exe'
_GENFILTER_EXE = 'genfilter.exe'
_LOGGER = logging.getLogger()
def _Shell(*cmd, **kw):
"""Shells out to "cmd". Returns a tuple of cmd's stdout, stderr."""
_LOGGER.info('Running command "%s".', cmd)
prog = subprocess.Popen(cmd, **kw)
stdout, stderr = prog.communicate()
if prog.returncode != 0:
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
return stdout, stderr
def _CompileFilter(syzygy_dir, executable, symbol, filter_file,
output_filter_file):
"""Compiles the provided filter writing the compiled filter file to
output_filter_file.
"""
cmd = [os.path.abspath(os.path.join(syzygy_dir, _GENFILTER_EXE)),
'--action=compile',
'--input-image=%s' % executable,
'--input-pdb=%s' % symbol,
'--output-file=%s' % output_filter_file,
'--overwrite',
os.path.abspath(filter_file)]
_Shell(*cmd)
if not os.path.exists(output_filter_file):
raise RuntimeError('Compiled filter file missing: %s' % output_filter_file)
return
def _InstrumentBinary(syzygy_dir, mode, executable, symbol, dst_dir,
filter_file, allocation_filter_file):
"""Instruments the executable found in input_dir, and writes the resultant
instrumented executable and symbol files to dst_dir.
"""
cmd = [os.path.abspath(os.path.join(syzygy_dir, _INSTRUMENT_EXE)),
'--overwrite',
'--mode=%s' % mode,
'--debug-friendly',
'--input-image=%s' % executable,
'--input-pdb=%s' % symbol,
'--output-image=%s' % os.path.abspath(
os.path.join(dst_dir, os.path.basename(executable))),
'--output-pdb=%s' % os.path.abspath(
os.path.join(dst_dir, os.path.basename(symbol)))]
if mode == "asan":
cmd.append('--no-augment-pdb')
# Disable some of the new SysyASAN features. We're seeing an increase in
# crash rates and are wondering if they are to blame.
cmd.append(
'--asan-rtl-options="--enable_feature_randomization '
'--prevent_duplicate_corruption_crashes"')
# If any filters were specified then pass them on to the instrumenter.
if filter_file:
cmd.append('--filter=%s' % os.path.abspath(filter_file))
if allocation_filter_file:
cmd.append('--allocation-filter-config-file=%s' %
os.path.abspath(allocation_filter_file))
return _Shell(*cmd)
def main(options):
# Make sure the destination directory exists.
if not os.path.isdir(options.destination_dir):
_LOGGER.info('Creating destination directory "%s".',
options.destination_dir)
os.makedirs(options.destination_dir)
# Compile the filter if one was provided.
if options.filter:
_CompileFilter(options.syzygy_dir,
options.input_executable,
options.input_symbol,
options.filter,
options.output_filter_file)
# Instruments the binaries into the destination directory.
_InstrumentBinary(options.syzygy_dir,
options.mode,
options.input_executable,
options.input_symbol,
options.destination_dir,
options.output_filter_file,
options.allocation_filter_file)
def _ParseOptions():
option_parser = optparse.OptionParser()
option_parser.add_option('--input_executable',
help='The path to the input executable.')
option_parser.add_option('--input_symbol',
help='The path to the input symbol file.')
option_parser.add_option('--mode',
help='Specifies which instrumentation mode is to be used.')
option_parser.add_option('--syzygy-dir', default=_DEFAULT_SYZYGY_DIR,
help='Instrumenter executable to use, defaults to "%default".')
option_parser.add_option('-d', '--destination_dir',
help='Destination directory for instrumented files.')
option_parser.add_option('--filter',
help='An optional filter. This will be compiled and passed to the '
'instrumentation executable.')
option_parser.add_option('--output-filter-file',
help='The path where the compiled filter will be written. This is '
'required if --filter is specified.')
option_parser.add_option('--allocation-filter-file',
help='The path to the SyzyASAN allocation filter to use.')
options, args = option_parser.parse_args()
if not options.mode:
option_parser.error('You must provide an instrumentation mode.')
if not options.input_executable:
option_parser.error('You must provide an input executable.')
if not options.input_symbol:
option_parser.error('You must provide an input symbol file.')
if not options.destination_dir:
option_parser.error('You must provide a destination directory.')
if options.filter and not options.output_filter_file:
option_parser.error('You must provide a filter output file.')
return options
if '__main__' == __name__:
logging.basicConfig(level=logging.INFO)
sys.exit(main(_ParseOptions()))
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.