repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
firebitsbr/dionaea | modules/python/scripts/http.py | 8 | 15597 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2009 Paul Baecher & Markus Koetter & Mark Schloesser
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact [email protected]
#*
#*******************************************************************************/
from dionaea.core import connection, g_dionaea, incident, ihandler
import struct
import logging
import os
import sys
import datetime
import io
import cgi
import urllib.parse
import re
import tempfile
logger = logging.getLogger('http')
logger.setLevel(logging.DEBUG)
class httpreq:
def __init__(self, header):
hlines = header.split(b'\n')
req = hlines[0]
reqparts = req.split(b" ")
self.type = reqparts[0]
self.path = urllib.parse.unquote(reqparts[1].decode('utf-8'))
self.version = reqparts[2]
r = self.version.find(b"\r")
if r:
self.version = self.version[:r]
self.headers = {}
for hline in hlines[1:]:
if hline[len(hline)-1] == 13: # \r
hline = hline[:len(hline)-1]
hset = hline.split(b":", 1)
self.headers[hset[0].lower()] = hset[1].strip()
def print(self):
logger.debug(self.type + b" " + self.path.encode('utf-8') + b" " + self.version)
for i in self.headers:
logger.debug(i + b":" + self.headers[i])
class httpd(connection):
def __init__(self, proto='tcp'):
logger.debug("http test")
connection.__init__(self,proto)
self.state = 'HEADER'
self.rwchunksize = 64*1024
self._out.speed.limit = 16*1024
self.env = None
self.boundary = None
self.fp_tmp = None
self.cur_length = 0
max_request_size = 32768
try:
if 'max-request-size' in g_dionaea.config()['modules']['python']['http']:
# try to convert value to int
max_request_size = int(g_dionaea.config()['modules']['python']['http']['max-request-size'])
else:
logger.info("Value for 'max-request-size' not found, using default value.")
except:
logger.warning("Error while converting 'max-request-size' to an integer value. Using default value.")
self.max_request_size = max_request_size * 1024
def handle_origin(self, parent):
self.root = parent.root
self.rwchunksize = parent.rwchunksize
def handle_established(self):
self.timeouts.idle = 10
self.processors()
def chroot(self, path):
self.root = path
def handle_io_in(self, data):
if self.state == 'HEADER':
# End Of Head
eoh = data.find(b'\r\n\r\n')
# Start Of Content
soc = eoh + 4
if eoh == -1:
eoh = data.find(b'\n\n')
soc = eoh + 2
if eoh == -1:
return 0
header = data[0:eoh]
data = data[soc:]
self.header = httpreq(header)
self.header.print()
if self.header.type == b'GET':
self.handle_GET()
return len(data)
elif self.header.type == b'HEAD':
self.handle_HEAD()
return len(data)
elif self.header.type == b'POST':
if b'content-type' not in self.header.headers and b'content-type' not in self.header.headers:
self.handle_POST()
return len(data)
try:
# at least this information are needed for cgi.FieldStorage() to parse the content
self.env = {
'REQUEST_METHOD':'POST',
'CONTENT_LENGTH': self.header.headers[b'content-length'].decode("utf-8"),
'CONTENT_TYPE': self.header.headers[b'content-type'].decode("utf-8")
}
except:
# ignore decode() errors
self.handle_POST()
return len(data)
m = re.compile("multipart/form-data;\s*boundary=(?P<boundary>.*)", re.IGNORECASE).match(self.env['CONTENT_TYPE'])
if not m:
self.handle_POST()
return len(data)
self.state = 'POST'
# More on boundaries see: http://www.apps.ietf.org/rfc/rfc2046.html#sec-5.1.1
self.boundary = bytes("--" + m.group("boundary") + "--\r\n", 'utf-8')
# dump post content to file
self.fp_tmp = tempfile.NamedTemporaryFile(delete=False, prefix='http-', suffix=g_dionaea.config()['downloads']['tmp-suffix'], dir=g_dionaea.config()['downloads']['dir'])
pos = data.find(self.boundary)
# ending boundary not found
if pos < 0:
self.cur_length = soc
return soc
self.fp_tmp.write(data[:pos])
self.handle_POST()
return soc + pos
elif self.header.type == b'OPTIONS':
self.handle_OPTIONS()
return len(data)
# ToDo
#elif self.header.type == b'PUT':
# self.handle_PUT()
# method not found
self.handle_unknown()
return len(data)
elif self.state == 'POST':
pos = data.find(self.boundary)
length = len(data)
if pos < 0:
# boundary not found
l = length - len(self.boundary)
if l < 0:
l = 0
self.cur_length = self.cur_length + l
if self.cur_length > self.max_request_size:
# Close connection if request is to large.
# RFC2616: "The server MAY close the connection to prevent the client from continuing the request."
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.14
x = self.send_error(413)
if x:
self.copyfile(x)
return length
self.fp_tmp.write(data[:l])
return l
# boundary found
self.fp_tmp.write(data[:pos+len(self.boundary)])
self.handle_POST()
return pos + len(self.boundary)
elif self.state == 'PUT':
print("putting to me")
elif self.state == 'SENDFILE':
print("sending file")
return 0
return len(data)
def handle_GET(self):
"""Handle the GET method. Send the header and the file."""
x = self.send_head()
if x :
self.copyfile(x)
def handle_HEAD(self):
"""Handle the HEAD method. Send only the header but not the file."""
x = self.send_head()
if x :
x.close()
self.close()
def handle_OPTIONS(self):
"""
Handle the OPTIONS method. Returns the HTTP methods that the server supports.
"""
self.send_response(200)
self.send_header("Allow", "OPTIONS, GET, HEAD, POST")
self.send_header("Content-Length", "0")
self.send_header("Connection", "close")
self.end_headers()
self.close()
def handle_POST(self):
"""
Handle the POST method. Send the head and the file. But ignore the POST params.
Use the bistreams for a better analysis.
"""
if self.fp_tmp != None:
self.fp_tmp.seek(0)
form = cgi.FieldStorage(fp = self.fp_tmp, environ = self.env)
for field_name in form.keys():
# dump only files
if form[field_name].filename == None:
continue
fp_post = form[field_name].file
data = fp_post.read(4096)
# don't handle empty files
if len(data) == 0:
continue
fp_tmp = tempfile.NamedTemporaryFile(delete=False, prefix='http-', suffix=g_dionaea.config()['downloads']['tmp-suffix'], dir=g_dionaea.config()['downloads']['dir'])
while data != b'':
fp_tmp.write(data)
data = fp_post.read(4096)
icd = incident("dionaea.download.complete")
icd.path = fp_tmp.name
# We need the url for logging
icd.url = ""
fp_tmp.close()
icd.report()
fp_tmp.unlink(fp_tmp.name)
self.fp_tmp.unlink(self.fp_tmp.name)
x = self.send_head()
if x :
self.copyfile(x)
def handle_PUT(self):
pass
def handle_unknown(self):
x = self.send_error(501)
if x:
self.copyfile(x)
def copyfile(self, f):
self.file = f
self.state = 'SENDFILE'
self.handle_io_out()
def send_head(self):
rpath = os.path.normpath(self.header.path)
fpath = os.path.join(self.root, rpath[1:])
apath = os.path.abspath(fpath)
aroot = os.path.abspath(self.root)
logger.debug("root %s aroot %s rpath %s fpath %s apath %s" % (self.root, aroot, rpath, fpath, apath))
if not apath.startswith(aroot):
self.send_response(404, "File not found")
self.end_headers()
self.close()
if os.path.exists(apath):
if os.path.isdir(apath):
if self.header.path.endswith('/'):
testpath = os.path.join(apath, "index.html")
if os.path.isfile(testpath):
apath = testpath
if os.path.isdir(apath):
if not self.header.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.header.path + "/")
self.send_header("Connection", "close")
self.end_headers()
self.close()
return None
return self.list_directory(apath)
elif os.path.isfile(apath):
f = io.open(apath, 'rb')
self.send_response(200)
self.send_header("Connection", "close")
self.send_header("Content-Length", str(os.stat(apath).st_size))
self.end_headers()
return f
else:
return self.send_error(404)
else:
return self.send_error(404)
return None
def handle_io_out(self):
logger.debug("handle_io_out")
if self.state == 'SENDFILE':
w = self.file.read(self.rwchunksize)
if len(w) > 0:
self.send(w)
# send call call handle_io_out
# to avoid double close warning we check state
if len(w) < self.rwchunksize and self.state != None:
self.state = None
self.close()
self.file.close()
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
list.append("..")
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
displaypath = cgi.escape(self.header.path)
r.append('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
r.append("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
r.append("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
r.append("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
r.append('<li><a href="%s">%s</a>\n' % (urllib.parse.quote(linkname), cgi.escape(displayname)))
r.append("</ul>\n<hr>\n</body>\n</html>\n")
enc = sys.getfilesystemencoding()
encoded = ''.join(r).encode(enc)
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.send_header("Connection", "close")
self.end_headers()
f = io.BytesIO()
f.write(encoded)
f.seek(0)
return f
def send_response(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
self.send("%s %d %s\r\n" % ("HTTP/1.0", code, message))
def send_error(self, code, message = None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
enc = sys.getfilesystemencoding()
r = []
r.append('<?xml version="1.0" encoding="%s"?>\n' % (enc))
r.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\n')
r.append(' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
r.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n')
r.append(' <head>\n')
r.append(' <title>%d - %s</title>\n' % (code, message))
r.append(' </head>\n')
r.append(' <body>\n')
r.append(' <h1>%d - %s</h1>\n' % (code, message))
r.append(' </body>\n')
r.append('</html>\n')
encoded = ''.join(r).encode(enc)
self.send_response(code, message)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.send_header("Connection", "close")
self.end_headers()
f = io.BytesIO()
f.write(encoded)
f.seek(0)
return f
def send_header(self, key, value):
self.send("%s: %s\r\n" % (key, value))
def end_headers(self):
self.send("\r\n")
def handle_disconnect(self):
return False
def handle_timeout_idle(self):
return False
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
| gpl-2.0 |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/test/test_shelve.py | 57 | 5834 | import unittest
import shelve
import glob
from test import support
from collections import MutableMapping
from test.test_dbm import dbm_iterator
def L1(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[L1(key)]
def __setitem__(self, key, value):
self.d[L1(key)] = value
def __delitem__(self, key):
del self.d[L1(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def copy(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin1')[key] = [1]
self.assertIn(key.encode('latin1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
s = shelve.Shelf(d, writeback=True)
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
youdonghai/intellij-community | python/helpers/py3only/docutils/_compat.py | 44 | 1538 | # $Id: _compat.py 7486 2012-07-11 12:25:14Z milde $
# Author: Georg Brandl <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Python 2/3 compatibility definitions.
This module currently provides the following helper symbols:
* bytes (name of byte string type; str in 2.x, bytes in 3.x)
* b (function converting a string literal to an ASCII byte string;
can be also used to convert a Unicode string into a byte string)
* u_prefix (unicode repr prefix: 'u' in 2.x, '' in 3.x)
(Required in docutils/test/test_publisher.py)
* BytesIO (a StringIO class that works with bytestrings)
"""
import sys
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return s.encode('latin1')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
# using this hack since 2to3 "fixes" the relative import
# when using ``from io import BytesIO``
BytesIO = __import__('io').BytesIO
if sys.version_info < (2,5):
import builtins
def __import__(name, globals={}, locals={}, fromlist=[], level=-1):
"""Compatibility definition for Python 2.4.
Silently ignore the `level` argument missing in Python < 2.5.
"""
# we need the level arg because the default changed in Python 3.3
return builtins.__import__(name, globals, locals, fromlist)
| apache-2.0 |
dacjames/scrapy | scrapy/telnet.py | 141 | 2946 | """
Scrapy Telnet Console extension
See documentation in docs/topics/telnetconsole.rst
"""
import pprint
import logging
from twisted.internet import protocol
try:
from twisted.conch import manhole, telnet
from twisted.conch.insults import insults
TWISTED_CONCH_AVAILABLE = True
except ImportError:
TWISTED_CONCH_AVAILABLE = False
from scrapy.exceptions import NotConfigured
from scrapy import signals
from scrapy.utils.trackref import print_live_refs
from scrapy.utils.engine import print_engine_status
from scrapy.utils.reactor import listen_tcp
try:
import guppy
hpy = guppy.hpy()
except ImportError:
hpy = None
logger = logging.getLogger(__name__)
# signal to update telnet variables
# args: telnet_vars
update_telnet_vars = object()
class TelnetConsole(protocol.ServerFactory):
def __init__(self, crawler):
if not crawler.settings.getbool('TELNETCONSOLE_ENABLED'):
raise NotConfigured
if not TWISTED_CONCH_AVAILABLE:
raise NotConfigured
self.crawler = crawler
self.noisy = False
self.portrange = [int(x) for x in crawler.settings.getlist('TELNETCONSOLE_PORT')]
self.host = crawler.settings['TELNETCONSOLE_HOST']
self.crawler.signals.connect(self.start_listening, signals.engine_started)
self.crawler.signals.connect(self.stop_listening, signals.engine_stopped)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def start_listening(self):
self.port = listen_tcp(self.portrange, self.host, self)
h = self.port.getHost()
logger.debug("Telnet console listening on %(host)s:%(port)d",
{'host': h.host, 'port': h.port},
extra={'crawler': self.crawler})
def stop_listening(self):
self.port.stopListening()
def protocol(self):
telnet_vars = self._get_telnet_vars()
return telnet.TelnetTransport(telnet.TelnetBootstrapProtocol,
insults.ServerProtocol, manhole.Manhole, telnet_vars)
def _get_telnet_vars(self):
# Note: if you add entries here also update topics/telnetconsole.rst
telnet_vars = {
'engine': self.crawler.engine,
'spider': self.crawler.engine.spider,
'slot': self.crawler.engine.slot,
'crawler': self.crawler,
'extensions': self.crawler.extensions,
'stats': self.crawler.stats,
'settings': self.crawler.settings,
'est': lambda: print_engine_status(self.crawler.engine),
'p': pprint.pprint,
'prefs': print_live_refs,
'hpy': hpy,
'help': "This is Scrapy telnet console. For more info see: " \
"http://doc.scrapy.org/en/latest/topics/telnetconsole.html",
}
self.crawler.signals.send_catch_log(update_telnet_vars, telnet_vars=telnet_vars)
return telnet_vars
| bsd-3-clause |
erikhaverkamp/mppmon | multiplexer.py | 1 | 3438 | from Phidget22.Devices.DigitalOutput import *
from Phidget22.Net import *
import time
class Multiplexer:
def __init__(self):
print('init mp')
try:
self.ch0 = DigitalOutput()
self.ch1 = DigitalOutput()
self.ch2 = DigitalOutput()
self.ch3 = DigitalOutput()
self.ch4 = DigitalOutput()
self.ch5 = DigitalOutput()
self.ch6 = DigitalOutput()
self.ch7 = DigitalOutput()
except RuntimeError as e:
print("Runtime Exception %s" % e.details)
print("Press Enter to Exit...\n")
raise
def ErrorEvent(self, e, eCode, description):
print("Error %i : %s" % (eCode, description))
try:
self.ch0.setOnErrorHandler(ErrorEvent)
self.ch1.setOnErrorHandler(ErrorEvent)
self.ch2.setOnErrorHandler(ErrorEvent)
self.ch3.setOnErrorHandler(ErrorEvent)
self.ch4.setOnErrorHandler(ErrorEvent)
self.ch5.setOnErrorHandler(ErrorEvent)
self.ch6.setOnErrorHandler(ErrorEvent)
self.ch7.setOnErrorHandler(ErrorEvent)
self.ch0.setChannel(0)
self.ch1.setChannel(1)
self.ch2.setChannel(2)
self.ch3.setChannel(3)
self.ch4.setChannel(4)
self.ch5.setChannel(5)
self.ch6.setChannel(6)
self.ch7.setChannel(7)
print("Waiting for the Phidget DigitalOutput Objects to be attached...")
self.ch0.openWaitForAttachment(5000)
self.ch1.openWaitForAttachment(5000)
self.ch2.openWaitForAttachment(5000)
self.ch3.openWaitForAttachment(5000)
self.ch4.openWaitForAttachment(5000)
self.ch5.openWaitForAttachment(5000)
self.ch6.openWaitForAttachment(5000)
self.ch7.openWaitForAttachment(5000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
raise
def SelectChannel(self, channel):
self.ch0.setState(0)
self.ch1.setState(0)
self.ch2.setState(0)
self.ch3.setState(0)
self.ch4.setState(0)
self.ch5.setState(0)
self.ch6.setState(0)
self.ch7.setState(0)
if channel < 0:
return
if channel == 0:
self.ch0.setState(1)
if channel == 1:
self.ch1.setState(1)
if channel == 2:
self.ch2.setState(1)
if channel == 3:
self.ch3.setState(1)
if channel == 4:
self.ch4.setState(1)
if channel == 5:
self.ch5.setState(1)
if channel == 6:
self.ch6.setState(1)
if channel == 7:
self.ch7.setState(1)
time.sleep(10)
def ChannelsOff(self):
self.SelectChannel(-1)
def close(self):
try:
self.ch0.close()
self.ch1.close()
self.ch2.close()
self.ch3.close()
self.ch4.close()
self.ch5.close()
self.ch6.close()
self.ch7.close()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
raise
print("Closed DigitalOutput device")
| mit |
lianwutech/plugin_serialrtu_modbus | plugin.py | 2 | 3860 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
modbus网络的串口数据采集插件
1、device_id的组成方式为ip_port_slaveid
2、设备类型为0,协议类型为modbus
3、devices_info_dict需要持久化设备信息,启动时加载,变化时写入
4、device_cmd内容:json字符串
"""
import time
from setting import *
from libs.daemon import Daemon
from libs.plugin import *
from libs.mqttclient import MQTTClient
# 全局变量
devices_file_name = "devices.txt"
config_file_name = "plugin.cfg"
# 日志对象
logger = logging.getLogger('plugin')
# 配置信息
config_info = load_config(config_file_name)
# 主函数
class PluginDaemon(Daemon):
def _run(self):
# 切换工作目录
os.chdir(cur_file_dir())
if "channel_type" not in config_info \
or "protocol_type" not in config_info \
or "mqtt" not in config_info \
or "channel" not in config_info \
or "protocol" not in config_info:
logger.fatal("配置文件配置项不全,启动失败。")
return
channel_type = config_info["channel_type"]
protocol_type = config_info["protocol_type"]
network_name = config_info["network_name"]
# 获取channel类对象
channel_class = load_channel(channel_type)
# 获取protocol类对象
protocol_class = load_protocol(protocol_type)
# 参数检查
if channel_class.check_config(config_info["channel"]) \
and protocol_class.check_config(config_info["protocol"]) \
and MQTTClient.check_config(config_info["mqtt"]):
logger.debug("参数检查通过。")
else:
logger.fatal("channel、protocol、mqtt参数配置项错误,请检查.")
return
# 此处需注意启动顺序,先创建mqtt对象,然后创建channel对象,mqtt对象设置channel属性,mqtt才能够链接服务器
# 1、初始化mqttclient对象
mqtt_client = MQTTClient(config_info["mqtt"], network_name)
result = mqtt_client.connect()
if not result:
logger.fatal("mqtt connect fail.")
return
# 2、初始化protocol对象
protocol = protocol_class(config_info["protocol"])
# 3、初始化channel对象
channel = channel_class(config_info["channel"], devices_file_name, protocol, mqtt_client, network_name)
# 4、设置通道对象
mqtt_client.set_channel(channel)
while True:
if not channel.isAlive():
logger.info("channel进程停止,重新启动。")
channel.start()
if not mqtt_client.isAlive():
logger.info("mqtt进程停止,重新启动。")
mqtt_client.start()
logger.debug("周期处理结束")
time.sleep(2)
# 主函数
def main(argv):
pid_file_path = "/tmp/%s.pid" % plugin_name
stdout_file_path = "/tmp/%s.stdout" % plugin_name
stderr_file_path = "/tmp/%s.stderr" % plugin_name
daemon = PluginDaemon(pid_file_path, stdout=stdout_file_path, stderr=stderr_file_path)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
logger.info("Unknown command")
sys.exit(2)
sys.exit(0)
elif len(sys.argv) == 1:
daemon.run()
else:
logger.info("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
def entry_point():
"""Zero-argument entry point for use with setuptools/distribute."""
raise SystemExit(main(sys.argv))
if __name__ == '__main__':
entry_point() | apache-2.0 |
bigdocker/cloud-init | cloudinit/distros/rhel_util.py | 6 | 2930 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from cloudinit.distros.parsers.resolv_conf import ResolvConf
from cloudinit.distros.parsers.sys_conf import SysConf
from cloudinit import log as logging
from cloudinit import util
LOG = logging.getLogger(__name__)
# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
def update_sysconfig_file(fn, adjustments, allow_empty=False):
if not adjustments:
return
(exists, contents) = read_sysconfig_file(fn)
updated_am = 0
for (k, v) in adjustments.items():
if v is None:
continue
v = str(v)
if len(v) == 0 and not allow_empty:
continue
contents[k] = v
updated_am += 1
if updated_am:
lines = [
str(contents),
]
if not exists:
lines.insert(0, util.make_header())
util.write_file(fn, "\n".join(lines) + "\n", 0644)
# Helper function to read a RHEL/SUSE /etc/sysconfig/* file
def read_sysconfig_file(fn):
exists = False
try:
contents = util.load_file(fn).splitlines()
exists = True
except IOError:
contents = []
return (exists, SysConf(contents))
# Helper function to update RHEL/SUSE /etc/resolv.conf
def update_resolve_conf_file(fn, dns_servers, search_servers):
try:
r_conf = ResolvConf(util.load_file(fn))
r_conf.parse()
except IOError:
util.logexc(LOG, "Failed at parsing %s reverting to an empty "
"instance", fn)
r_conf = ResolvConf('')
r_conf.parse()
if dns_servers:
for s in dns_servers:
try:
r_conf.add_nameserver(s)
except ValueError:
util.logexc(LOG, "Failed at adding nameserver %s", s)
if search_servers:
for s in search_servers:
try:
r_conf.add_search_domain(s)
except ValueError:
util.logexc(LOG, "Failed at adding search domain %s", s)
util.write_file(fn, str(r_conf), 0644)
| gpl-3.0 |
jwlawson/tensorflow | tensorflow/python/eager/execute.py | 16 | 8289 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
op_name, inputs, attrs,
num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
return tensors
def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Monkey-patch to execute to enable execution callbacks."""
tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
for callback in ctx.post_execution_callbacks:
callback(op_name, name, attrs, inputs, tensors)
return tensors
execute = quick_execute
def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_results,
unused_name):
"""Import backprop if you want gradients recorded."""
pass
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name))
def args_to_matching_eager(l, ctx, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
EagerTensor = ops.EagerTensor # pylint: disable=invalid-name
for x in l:
if not isinstance(x, EagerTensor):
break
else: # note: intentional for-else
return l[0]._datatype_enum(), l # pylint: disable=protected-access
# TODO(josh11b): Could we do a better job if we also passed in the
# allowed dtypes when that was known?
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(t, EagerTensor):
dtype = t.dtype
break
internal_convert_to_tensor = ops.internal_convert_to_tensor
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
ret.append(internal_convert_to_tensor(
t, dtype, preferred_dtype=default_dtype, ctx=ctx))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]
return dtype.as_datatype_enum, ret
def convert_to_mixed_eager_tensors(values, ctx):
v = [
t if isinstance(t, ops.EagerTensor) else ops.EagerTensor(
t, context=ctx._handle, device=ctx.device_name) # pylint: disable=protected-access
for t in values
]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
def args_to_mixed_eager_tensors(lists, ctx):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(l[i], ops.EagerTensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.internal_convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret
| apache-2.0 |
kirbyfan64/pygments-unofficial | pygments/util.py | 10 | 8806 | # -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''(?smx)
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*\s+
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")
[^>]*>
''')
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
class ClassNotFound(ValueError):
"""
If one of the get_*_by_* functions didn't find a matching class.
"""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""
Return a static text analysation function that
returns float values.
"""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
"""
Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""
Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""
Check if the file looks like it has a html doctype.
"""
return doctype_matches(text, r'html\s+PUBLIC\s+"-//W3C//DTD X?HTML.*')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""
Check if a doctype exists or if we have some tags.
"""
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python narrow build compatibility
def _surrogatepair(c):
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def unirange(a, b):
"""
Returns a regular expression string to match the given non-BMP range.
"""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
# Python 2/3 compatibility
if sys.version_info < (3, 0):
unichr = unichr
xrange = xrange
string_types = (str, unicode)
text_type = unicode
u_prefix = 'u'
iteritems = dict.iteritems
itervalues = dict.itervalues
import StringIO, cStringIO
# unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
BytesIO = cStringIO.StringIO
else:
unichr = chr
xrange = range
string_types = (str,)
text_type = str
u_prefix = ''
iteritems = dict.items
itervalues = dict.values
from io import StringIO, BytesIO
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
| bsd-2-clause |
felixfontein/ansible | test/lib/ansible_test/_internal/delegation.py | 13 | 21085 | """Delegate test execution to another environment."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import sys
import tempfile
from . import types as t
from .io import (
make_dirs,
read_text_file,
)
from .executor import (
create_shell_command,
run_pypi_proxy,
get_python_interpreter,
get_python_version,
)
from .config import (
TestConfig,
EnvironmentConfig,
IntegrationConfig,
WindowsIntegrationConfig,
NetworkIntegrationConfig,
ShellConfig,
SanityConfig,
UnitsConfig,
)
from .core_ci import (
AnsibleCoreCI,
SshKey,
)
from .manage_ci import (
ManagePosixCI,
ManageWindowsCI,
get_ssh_key_setup,
)
from .util import (
ApplicationError,
common_environment,
display,
ANSIBLE_BIN_PATH,
ANSIBLE_TEST_DATA_ROOT,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_ROOT,
tempdir,
SUPPORTED_PYTHON_VERSIONS,
)
from .util_common import (
run_command,
ResultType,
create_interpreter_wrapper,
get_docker_completion,
get_remote_completion,
)
from .docker_util import (
docker_exec,
docker_get,
docker_inspect,
docker_pull,
docker_put,
docker_rm,
docker_run,
docker_network_disconnect,
get_docker_command,
get_docker_hostname,
)
from .containers import (
SshConnectionDetail,
support_container_context,
)
from .data import (
data_context,
)
from .payload import (
create_payload,
)
from .venv import (
create_virtual_environment,
)
from .ci import (
get_ci_provider,
)
def check_delegation_args(args):
"""
:type args: CommonConfig
"""
if not isinstance(args, EnvironmentConfig):
return
if args.docker:
get_python_version(args, get_docker_completion(), args.docker_raw)
elif args.remote:
get_python_version(args, get_remote_completion(), args.remote)
def delegate(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:rtype: bool
"""
if isinstance(args, TestConfig):
args.metadata.ci_provider = get_ci_provider().code
make_dirs(ResultType.TMP.path)
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
args.metadata.to_file(args.metadata_path)
try:
return delegate_command(args, exclude, require)
finally:
args.metadata_path = None
else:
return delegate_command(args, exclude, require)
def delegate_command(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:rtype: bool
"""
if args.venv:
delegate_venv(args, exclude, require)
return True
if args.docker:
delegate_docker(args, exclude, require)
return True
if args.remote:
delegate_remote(args, exclude, require)
return True
return False
def delegate_venv(args, # type: EnvironmentConfig
exclude, # type: t.List[str]
require, # type: t.List[str]
): # type: (...) -> None
"""Delegate ansible-test execution to a virtual environment using venv or virtualenv."""
if args.python:
versions = (args.python_version,)
else:
versions = SUPPORTED_PYTHON_VERSIONS
if args.venv_system_site_packages:
suffix = '-ssp'
else:
suffix = ''
venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s%s' % (version, suffix))) for version in versions)
venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path, args.venv_system_site_packages))
if not venvs:
raise ApplicationError('No usable virtual environment support found.')
options = {
'--venv': 0,
'--venv-system-site-packages': 0,
}
with tempdir() as inject_path:
for version, path in venvs.items():
create_interpreter_wrapper(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version))
python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version)
cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'venv']
env = common_environment()
with tempdir() as library_path:
# expose ansible and ansible_test to the virtual environment (only required when running from an install)
os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible'))
os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test'))
env.update(
PATH=inject_path + os.path.pathsep + env['PATH'],
PYTHONPATH=library_path,
)
with support_container_context(args, None) as containers:
if containers:
cmd.extend(['--containers', json.dumps(containers.to_dict())])
run_command(args, cmd, env=env)
def delegate_docker(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
get_docker_command(required=True) # fail early if docker is not available
test_image = args.docker
privileged = args.docker_privileged
docker_pull(args, test_image)
test_id = None
success = False
options = {
'--docker': 1,
'--docker-privileged': 0,
'--docker-util': 1,
}
python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw)
pwd = '/root'
ansible_root = os.path.join(pwd, 'ansible')
if data_context().content.collection:
content_root = os.path.join(pwd, data_context().content.collection.directory)
else:
content_root = ansible_root
remote_results_root = os.path.join(content_root, data_context().content.results_path)
cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
image_label = args.docker_raw
image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label)
cmd += ['--coverage-label', 'docker-%s' % image_label]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
cmd_options = []
if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy):
cmd_options.append('-it')
pypi_proxy_id, pypi_proxy_endpoint = run_pypi_proxy(args)
if pypi_proxy_endpoint:
cmd += ['--pypi-endpoint', pypi_proxy_endpoint]
with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
try:
create_payload(args, local_source_fd.name)
test_options = [
'--detach',
'--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
'--privileged=%s' % str(privileged).lower(),
]
if args.docker_memory:
test_options.extend([
'--memory=%d' % args.docker_memory,
'--memory-swap=%d' % args.docker_memory,
])
docker_socket = '/var/run/docker.sock'
if args.docker_seccomp != 'default':
test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp]
if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)]
test_id = docker_run(args, test_image, options=test_options)
setup_sh = read_text_file(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'))
ssh_keys_sh = get_ssh_key_setup(SshKey(args))
setup_sh += ssh_keys_sh
shell = setup_sh.splitlines()[0][2:]
docker_exec(args, test_id, [shell], data=setup_sh)
# write temporary files to /root since /tmp isn't ready immediately on container start
docker_put(args, test_id, local_source_fd.name, '/root/test.tgz')
docker_exec(args, test_id, ['tar', 'oxzf', '/root/test.tgz', '-C', '/root'])
# docker images are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
# run unit tests unprivileged to prevent stray writes to the source tree
# also disconnect from the network once requirements have been installed
if isinstance(args, UnitsConfig):
writable_dirs = [
os.path.join(content_root, ResultType.JUNIT.relative_path),
os.path.join(content_root, ResultType.COVERAGE.relative_path),
]
docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs)
docker_exec(args, test_id, ['chmod', '777'] + writable_dirs)
docker_exec(args, test_id, ['chmod', '755', '/root'])
docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)])
docker_exec(args, test_id, ['useradd', 'pytest', '--create-home'])
docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options)
container = docker_inspect(args, test_id)
networks = container.get_network_names()
if networks is not None:
for network in networks:
docker_network_disconnect(args, test_id, network)
else:
display.warning('Network disconnection is not supported (this is normal under podman). '
'Tests will not be isolated from the network. Network-related tests may misbehave.')
cmd += ['--requirements-mode', 'skip']
cmd_options += ['--user', 'pytest']
try:
with support_container_context(args, None) as containers:
if containers:
cmd.extend(['--containers', json.dumps(containers.to_dict())])
docker_exec(args, test_id, cmd, options=cmd_options)
# docker_exec will throw SubprocessError if not successful
# If we make it here, all the prep work earlier and the docker_exec line above were all successful.
success = True
finally:
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_test_root = os.path.dirname(remote_results_root)
remote_results_name = os.path.basename(remote_results_root)
remote_temp_file = os.path.join('/root', remote_results_name + '.tgz')
try:
make_dirs(local_test_root) # make sure directory exists for collections which have no tests
with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root,
remote_results_name])
docker_get(args, test_id, remote_temp_file, local_result_fd.name)
run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root])
except Exception as ex: # pylint: disable=broad-except
if success:
raise # download errors are fatal, but only if tests succeeded
# handle download error here to avoid masking test failures
display.warning('Failed to download results while handling an exception: %s' % ex)
finally:
if pypi_proxy_id:
docker_rm(args, pypi_proxy_id)
if test_id:
if args.docker_terminate == 'always' or (args.docker_terminate == 'success' and success):
docker_rm(args, test_id)
def delegate_remote(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
remote = args.parsed_remote
core_ci = AnsibleCoreCI(args, remote.platform, remote.version, stage=args.remote_stage, provider=args.remote_provider, arch=remote.arch)
success = False
ssh_options = []
content_root = None
try:
core_ci.start()
core_ci.wait()
python_version = get_python_version(args, get_remote_completion(), args.remote)
python_interpreter = None
if remote.platform == 'windows':
# Windows doesn't need the ansible-test fluff, just run the SSH command
manage = ManageWindowsCI(core_ci)
manage.setup(python_version)
cmd = ['powershell.exe']
elif isinstance(args, ShellConfig) and args.raw:
manage = ManagePosixCI(core_ci)
manage.setup(python_version)
cmd = create_shell_command(['sh'])
else:
manage = ManagePosixCI(core_ci)
pwd = manage.setup(python_version)
options = {
'--remote': 1,
}
python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
ansible_root = os.path.join(pwd, 'ansible')
if data_context().content.collection:
content_root = os.path.join(pwd, data_context().content.collection.directory)
else:
content_root = ansible_root
cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (remote.platform, remote.version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
try:
ssh_con = core_ci.connection
ssh = SshConnectionDetail(core_ci.name, ssh_con.hostname, ssh_con.port, ssh_con.username, core_ci.ssh_key.key, python_interpreter)
with support_container_context(args, ssh) as containers:
if containers:
cmd.extend(['--containers', json.dumps(containers.to_dict())])
manage.ssh(cmd, ssh_options)
success = True
finally:
download = False
if remote.platform != 'windows':
download = True
if isinstance(args, ShellConfig):
if args.raw:
download = False
if download and content_root:
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_results_root = os.path.join(content_root, data_context().content.results_path)
remote_results_name = os.path.basename(remote_results_root)
remote_temp_path = os.path.join('/tmp', remote_results_name)
# AIX cp and GNU cp provide different options, no way could be found to have a common
# pattern and achieve the same goal
cp_opts = '-hr' if remote.platform == 'aix' else '-a'
try:
command = 'rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root)
manage.ssh(command, capture=True) # pylint: disable=unexpected-keyword-arg
manage.download(remote_temp_path, local_test_root)
except Exception as ex: # pylint: disable=broad-except
if success:
raise # download errors are fatal, but only if tests succeeded
# handle download error here to avoid masking test failures
display.warning('Failed to download results while handling an exception: %s' % ex)
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require):
"""
:type args: EnvironmentConfig
:type python_interpreter: str | None
:type ansible_bin_path: str
:type content_root: str
:type options: dict[str, int]
:type exclude: list[str]
:type require: list[str]
:rtype: list[str]
"""
options['--color'] = 1
cmd = [os.path.join(ansible_bin_path, 'ansible-test')]
if python_interpreter:
cmd = [python_interpreter] + cmd
# Force the encoding used during delegation.
# This is only needed because ansible-test relies on Python's file system encoding.
# Environments that do not have the locale configured are thus unable to work with unicode file paths.
# Examples include FreeBSD and some Linux containers.
env_vars = dict(
LC_ALL='en_US.UTF-8',
ANSIBLE_TEST_CONTENT_ROOT=content_root,
)
env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)]
cmd = ['/usr/bin/env'] + env_args + cmd
cmd += list(filter_options(args, sys.argv[1:], options, exclude, require))
cmd += ['--color', 'yes' if args.color else 'no']
if args.requirements:
cmd += ['--requirements']
if isinstance(args, ShellConfig):
cmd = create_shell_command(cmd)
elif isinstance(args, SanityConfig):
base_branch = args.base_branch or get_ci_provider().get_base_branch()
if base_branch:
cmd += ['--base-branch', base_branch]
return cmd
def filter_options(args, argv, options, exclude, require):
"""
:type args: EnvironmentConfig
:type argv: list[str]
:type options: dict[str, int]
:type exclude: list[str]
:type require: list[str]
:rtype: collections.Iterable[str]
"""
options = options.copy()
options['--requirements'] = 0
options['--truncate'] = 1
options['--redact'] = 0
options['--no-redact'] = 0
if isinstance(args, TestConfig):
options.update({
'--changed': 0,
'--tracked': 0,
'--untracked': 0,
'--ignore-committed': 0,
'--ignore-staged': 0,
'--ignore-unstaged': 0,
'--changed-from': 1,
'--changed-path': 1,
'--metadata': 1,
'--exclude': 1,
'--require': 1,
})
elif isinstance(args, SanityConfig):
options.update({
'--base-branch': 1,
})
if isinstance(args, IntegrationConfig):
options.update({
'--no-temp-unicode': 0,
'--no-pip-check': 0,
})
if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)):
options.update({
'--inventory': 1,
})
remaining = 0
for arg in argv:
if not arg.startswith('-') and remaining:
remaining -= 1
continue
remaining = 0
parts = arg.split('=', 1)
key = parts[0]
if key in options:
remaining = options[key] - len(parts) + 1
continue
yield arg
for arg in args.delegate_args:
yield arg
for target in exclude:
yield '--exclude'
yield target
for target in require:
yield '--require'
yield target
if isinstance(args, TestConfig):
if args.metadata_path:
yield '--metadata'
yield args.metadata_path
yield '--truncate'
yield '%d' % args.truncate
if args.redact:
yield '--redact'
else:
yield '--no-redact'
if isinstance(args, IntegrationConfig):
if args.no_temp_unicode:
yield '--no-temp-unicode'
if not args.pip_check:
yield '--no-pip-check'
| gpl-3.0 |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/ipykernel/tests/test_message_spec.py | 8 | 14797 | """Test suite for our zeromq-based message specification."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import sys
from distutils.version import LooseVersion as V
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
import nose.tools as nt
from nose.plugins.skip import SkipTest
from traitlets import (
HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum
)
from ipython_genutils.py3compat import string_types, iteritems
from .utils import TIMEOUT, start_global_kernel, flush_channels, execute
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
KC = None
def setup():
global KC
KC = start_global_kernel()
#-----------------------------------------------------------------------------
# Message Spec References
#-----------------------------------------------------------------------------
class Reference(HasTraits):
"""
Base class for message spec specification testing.
This class is the core of the message specification test. The
idea is that child classes implement trait attributes for each
message keys, so that message keys can be tested against these
traits using :meth:`check` method.
"""
def check(self, d):
"""validate a dict against our traits"""
for key in self.trait_names():
nt.assert_in(key, d)
# FIXME: always allow None, probably not a good idea
if d[key] is None:
continue
try:
setattr(self, key, d[key])
except TraitError as e:
assert False, str(e)
class Version(Unicode):
def __init__(self, *args, **kwargs):
self.min = kwargs.pop('min', None)
self.max = kwargs.pop('max', None)
kwargs['default_value'] = self.min
super(Version, self).__init__(*args, **kwargs)
def validate(self, obj, value):
if self.min and V(value) < V(self.min):
raise TraitError("bad version: %s < %s" % (value, self.min))
if self.max and (V(value) > V(self.max)):
raise TraitError("bad version: %s > %s" % (value, self.max))
class RMessage(Reference):
msg_id = Unicode()
msg_type = Unicode()
header = Dict()
parent_header = Dict()
content = Dict()
def check(self, d):
super(RMessage, self).check(d)
RHeader().check(self.header)
if self.parent_header:
RHeader().check(self.parent_header)
class RHeader(Reference):
msg_id = Unicode()
msg_type = Unicode()
session = Unicode()
username = Unicode()
version = Version(min='5.0')
mime_pat = re.compile(r'^[\w\-\+\.]+/[\w\-\+\.]+$')
class MimeBundle(Reference):
metadata = Dict()
data = Dict()
def _data_changed(self, name, old, new):
for k,v in iteritems(new):
assert mime_pat.match(k)
nt.assert_is_instance(v, string_types)
# shell replies
class Reply(Reference):
status = Enum((u'ok', u'error'), default_value=u'ok')
class ExecuteReply(Reply):
execution_count = Integer()
def check(self, d):
Reference.check(self, d)
if d['status'] == 'ok':
ExecuteReplyOkay().check(d)
elif d['status'] == 'error':
ExecuteReplyError().check(d)
class ExecuteReplyOkay(Reply):
status = Enum(('ok',))
user_expressions = Dict()
class ExecuteReplyError(Reply):
ename = Unicode()
evalue = Unicode()
traceback = List(Unicode())
class InspectReply(Reply, MimeBundle):
found = Bool()
class ArgSpec(Reference):
args = List(Unicode())
varargs = Unicode()
varkw = Unicode()
defaults = List()
class Status(Reference):
execution_state = Enum((u'busy', u'idle', u'starting'), default_value=u'busy')
class CompleteReply(Reply):
matches = List(Unicode())
cursor_start = Integer()
cursor_end = Integer()
status = Unicode()
class LanguageInfo(Reference):
name = Unicode('python')
version = Unicode(sys.version.split()[0])
class KernelInfoReply(Reply):
protocol_version = Version(min='5.0')
implementation = Unicode('ipython')
implementation_version = Version(min='2.1')
language_info = Dict()
banner = Unicode()
def check(self, d):
Reference.check(self, d)
LanguageInfo().check(d['language_info'])
class ConnectReply(Reference):
shell_port = Integer()
control_port = Integer()
stdin_port = Integer()
iopub_port = Integer()
hb_port = Integer()
class CommInfoReply(Reply):
comms = Dict()
class IsCompleteReply(Reference):
status = Enum((u'complete', u'incomplete', u'invalid', u'unknown'), default_value=u'complete')
def check(self, d):
Reference.check(self, d)
if d['status'] == 'incomplete':
IsCompleteReplyIncomplete().check(d)
class IsCompleteReplyIncomplete(Reference):
indent = Unicode()
# IOPub messages
class ExecuteInput(Reference):
code = Unicode()
execution_count = Integer()
class Error(ExecuteReplyError):
"""Errors are the same as ExecuteReply, but without status"""
status = None # no status field
class Stream(Reference):
name = Enum((u'stdout', u'stderr'), default_value=u'stdout')
text = Unicode()
class DisplayData(MimeBundle):
pass
class ExecuteResult(MimeBundle):
execution_count = Integer()
class HistoryReply(Reply):
history = List(List())
references = {
'execute_reply' : ExecuteReply(),
'inspect_reply' : InspectReply(),
'status' : Status(),
'complete_reply' : CompleteReply(),
'kernel_info_reply': KernelInfoReply(),
'connect_reply': ConnectReply(),
'comm_info_reply': CommInfoReply(),
'is_complete_reply': IsCompleteReply(),
'execute_input' : ExecuteInput(),
'execute_result' : ExecuteResult(),
'history_reply' : HistoryReply(),
'error' : Error(),
'stream' : Stream(),
'display_data' : DisplayData(),
'header' : RHeader(),
}
"""
Specifications of `content` part of the reply messages.
"""
def validate_message(msg, msg_type=None, parent=None):
"""validate a message
This is a generator, and must be iterated through to actually
trigger each test.
If msg_type and/or parent are given, the msg_type and/or parent msg_id
are compared with the given values.
"""
RMessage().check(msg)
if msg_type:
nt.assert_equal(msg['msg_type'], msg_type)
if parent:
nt.assert_equal(msg['parent_header']['msg_id'], parent)
content = msg['content']
ref = references[msg['msg_type']]
ref.check(content)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
# Shell channel
def test_execute():
flush_channels()
msg_id = KC.execute(code='x=1')
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'execute_reply', msg_id)
def test_execute_silent():
flush_channels()
msg_id, reply = execute(code='x=1', silent=True)
# flush status=idle
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(status, 'status', msg_id)
nt.assert_equal(status['content']['execution_state'], 'idle')
nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count = reply['execution_count']
msg_id, reply = execute(code='x=2', silent=True)
# flush status=idle
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(status, 'status', msg_id)
nt.assert_equal(status['content']['execution_state'], 'idle')
nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count_2 = reply['execution_count']
nt.assert_equal(count_2, count)
def test_execute_error():
flush_channels()
msg_id, reply = execute(code='1/0')
nt.assert_equal(reply['status'], 'error')
nt.assert_equal(reply['ename'], 'ZeroDivisionError')
error = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(error, 'error', msg_id)
def test_execute_inc():
"""execute request should increment execution_count"""
flush_channels()
msg_id, reply = execute(code='x=1')
count = reply['execution_count']
flush_channels()
msg_id, reply = execute(code='x=2')
count_2 = reply['execution_count']
nt.assert_equal(count_2, count+1)
def test_execute_stop_on_error():
"""execute request should not abort execution queue with stop_on_error False"""
flush_channels()
fail = '\n'.join([
# sleep to ensure subsequent message is waiting in the queue to be aborted
'import time',
'time.sleep(0.5)',
'raise ValueError',
])
KC.execute(code=fail)
msg_id = KC.execute(code='print("Hello")')
KC.get_shell_msg(timeout=TIMEOUT)
reply = KC.get_shell_msg(timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'aborted')
flush_channels()
KC.execute(code=fail, stop_on_error=False)
msg_id = KC.execute(code='print("Hello")')
KC.get_shell_msg(timeout=TIMEOUT)
reply = KC.get_shell_msg(timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
def test_user_expressions():
flush_channels()
msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
user_expressions = reply['user_expressions']
nt.assert_equal(user_expressions, {u'foo': {
u'status': u'ok',
u'data': {u'text/plain': u'2'},
u'metadata': {},
}})
def test_user_expressions_fail():
flush_channels()
msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
user_expressions = reply['user_expressions']
foo = user_expressions['foo']
nt.assert_equal(foo['status'], 'error')
nt.assert_equal(foo['ename'], 'NameError')
def test_oinfo():
flush_channels()
msg_id = KC.inspect('a')
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
def test_oinfo_found():
flush_channels()
msg_id, reply = execute(code='a=5')
msg_id = KC.inspect('a')
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
content = reply['content']
assert content['found']
text = content['data']['text/plain']
nt.assert_in('Type:', text)
nt.assert_in('Docstring:', text)
def test_oinfo_detail():
flush_channels()
msg_id, reply = execute(code='ip=get_ipython()')
msg_id = KC.inspect('ip.object_inspect', cursor_pos=10, detail_level=1)
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
content = reply['content']
assert content['found']
text = content['data']['text/plain']
nt.assert_in('Signature:', text)
nt.assert_in('Source:', text)
def test_oinfo_not_found():
flush_channels()
msg_id = KC.inspect('dne')
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
content = reply['content']
nt.assert_false(content['found'])
def test_complete():
flush_channels()
msg_id, reply = execute(code="alpha = albert = 5")
msg_id = KC.complete('al', 2)
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'complete_reply', msg_id)
matches = reply['content']['matches']
for name in ('alpha', 'albert'):
nt.assert_in(name, matches)
def test_kernel_info_request():
flush_channels()
msg_id = KC.kernel_info()
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'kernel_info_reply', msg_id)
def test_connect_request():
flush_channels()
msg = KC.session.msg('connect_request')
KC.shell_channel.send(msg)
return msg['header']['msg_id']
msg_id = KC.kernel_info()
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'connect_reply', msg_id)
def test_comm_info_request():
flush_channels()
if not hasattr(KC, 'comm_info'):
raise SkipTest()
msg_id = KC.comm_info()
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'comm_info_reply', msg_id)
def test_single_payload():
flush_channels()
msg_id, reply = execute(code="for i in range(3):\n"+
" x=range?\n")
payload = reply['payload']
next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"]
nt.assert_equal(len(next_input_pls), 1)
def test_is_complete():
flush_channels()
msg_id = KC.is_complete("a = 1")
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'is_complete_reply', msg_id)
def test_history_range():
flush_channels()
msg_id_exec = KC.execute(code='x=1', store_history = True)
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
msg_id = KC.history(hist_access_type = 'range', raw = True, output = True, start = 1, stop = 2, session = 0)
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'history_reply', msg_id)
content = reply['content']
nt.assert_equal(len(content['history']), 1)
def test_history_tail():
flush_channels()
msg_id_exec = KC.execute(code='x=1', store_history = True)
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
msg_id = KC.history(hist_access_type = 'tail', raw = True, output = True, n = 1, session = 0)
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'history_reply', msg_id)
content = reply['content']
nt.assert_equal(len(content['history']), 1)
def test_history_search():
flush_channels()
msg_id_exec = KC.execute(code='x=1', store_history = True)
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
msg_id = KC.history(hist_access_type = 'search', raw = True, output = True, n = 1, pattern = '*', session = 0)
reply = KC.get_shell_msg(timeout=TIMEOUT)
validate_message(reply, 'history_reply', msg_id)
content = reply['content']
nt.assert_equal(len(content['history']), 1)
# IOPub channel
def test_stream():
flush_channels()
msg_id, reply = execute("print('hi')")
stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(stdout, 'stream', msg_id)
content = stdout['content']
nt.assert_equal(content['text'], u'hi\n')
def test_display_data():
flush_channels()
msg_id, reply = execute("from IPython.core.display import display; display(1)")
display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(display, 'display_data', parent=msg_id)
data = display['content']['data']
nt.assert_equal(data['text/plain'], u'1')
| mit |
luceatnobis/youtube-dl | youtube_dl/extractor/c56.py | 91 | 2083 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import js_to_json
class C56IE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|player)\.)?56\.com/(?:.+?/)?(?:v_|(?:play_album.+-))(?P<textid>.+?)\.(?:html|swf)'
IE_NAME = '56.com'
_TESTS = [{
'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html',
'md5': 'e59995ac63d0457783ea05f93f12a866',
'info_dict': {
'id': '93440716',
'ext': 'flv',
'title': '网事知多少 第32期:车怒',
'duration': 283.813,
},
}, {
'url': 'http://www.56.com/u47/v_MTM5NjQ5ODc2.html',
'md5': '',
'info_dict': {
'id': '82247482',
'title': '爱的诅咒之杜鹃花开',
},
'playlist_count': 7,
'add_ie': ['Sohu'],
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
text_id = mobj.group('textid')
webpage = self._download_webpage(url, text_id)
sohu_video_info_str = self._search_regex(
r'var\s+sohuVideoInfo\s*=\s*({[^}]+});', webpage, 'Sohu video info', default=None)
if sohu_video_info_str:
sohu_video_info = self._parse_json(
sohu_video_info_str, text_id, transform_source=js_to_json)
return self.url_result(sohu_video_info['url'], 'Sohu')
page = self._download_json(
'http://vxml.56.com/json/%s/' % text_id, text_id, 'Downloading video info')
info = page['info']
formats = [
{
'format_id': f['type'],
'filesize': int(f['filesize']),
'url': f['url']
} for f in info['rfiles']
]
self._sort_formats(formats)
return {
'id': info['vid'],
'title': info['Subject'],
'duration': int(info['duration']) / 1000.0,
'formats': formats,
'thumbnail': info.get('bimg') or info.get('img'),
}
| unlicense |
elliotthill/django-oscar | oscar/apps/payment/abstract_models.py | 2 | 11000 | from decimal import Decimal
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from oscar.core.compat import AUTH_USER_MODEL
from oscar.core.utils import slugify
from oscar.templatetags.currency_filters import currency
from . import bankcards
class AbstractTransaction(models.Model):
"""
A transaction for a particular payment source.
These are similar to the payment events within the order app but model a
slightly different aspect of payment. Crucially, payment sources and
transactions have nothing to do with the lines of the order while payment
events do.
For example:
* A 'pre-auth' with a bankcard gateway
* A 'settle' with a credit provider (see django-oscar-accounts)
"""
source = models.ForeignKey(
'payment.Source', related_name='transactions',
verbose_name=_("Source"))
# We define some sample types but don't constrain txn_type to be one of
# these as there will be domain-specific ones that we can't anticipate
# here.
AUTHORISE, DEBIT, REFUND = 'Authorise', 'Debit', 'Refund'
txn_type = models.CharField(_("Type"), max_length=128, blank=True)
amount = models.DecimalField(_("Amount"), decimal_places=2, max_digits=12)
reference = models.CharField(_("Reference"), max_length=128, blank=True)
status = models.CharField(_("Status"), max_length=128, blank=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __unicode__(self):
return _(u"%(type)s of %(amount).2f") % {
'type': self.txn_type,
'amount': self.amount}
class Meta:
abstract = True
verbose_name = _("Transaction")
verbose_name_plural = _("Transactions")
ordering = ['-date_created']
class AbstractSource(models.Model):
"""
A source of payment for an order.
This is normally a credit card which has been pre-authed for the order
amount, but some applications will allow orders to be paid for using
multiple sources such as cheque, credit accounts, gift cards. Each payment
source will have its own entry.
This source object tracks how much money has been authorised, debited and
refunded, which is useful when payment takes place in multiple stages.
"""
order = models.ForeignKey(
'order.Order', related_name='sources', verbose_name=_("Order"))
source_type = models.ForeignKey(
'payment.SourceType', verbose_name=_("Source Type"),
related_name="sources")
currency = models.CharField(
_("Currency"), max_length=12, default=settings.OSCAR_DEFAULT_CURRENCY)
# Track the various amounts associated with this source
amount_allocated = models.DecimalField(
_("Amount Allocated"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
amount_debited = models.DecimalField(
_("Amount Debited"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
amount_refunded = models.DecimalField(
_("Amount Refunded"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
# Reference number for this payment source. This is often used to look up
# a transaction model for a particular payment partner.
reference = models.CharField(_("Reference"), max_length=128, blank=True)
# A customer-friendly label for the source, eg XXXX-XXXX-XXXX-1234
label = models.CharField(_("Label"), max_length=128, blank=True)
# A dictionary of submission data that is stored as part of the
# checkout process, where we need to pass an instance of this class around
submission_data = None
# We keep a list of deferred transactions that are only actually saved when
# the source is saved for the first time
deferred_txns = None
class Meta:
abstract = True
verbose_name = _("Source")
verbose_name_plural = _("Sources")
def __unicode__(self):
description = _("Allocation of %(amount)s from type %(type)s") % {
'amount': currency(self.amount_allocated, self.currency),
'type': self.source_type}
if self.reference:
description += _(" (reference: %s)") % self.reference
return description
def save(self, *args, **kwargs):
super(AbstractSource, self).save(*args, **kwargs)
if self.deferred_txns:
for txn in self.deferred_txns:
self._create_transaction(*txn)
def create_deferred_transaction(self, txn_type, amount, reference=None,
status=None):
"""
Register the data for a transaction that can't be created yet due to FK
constraints. This happens at checkout where create an payment source
and a transaction but can't save them until the order model exists.
"""
if self.deferred_txns is None:
self.deferred_txns = []
self.deferred_txns.append((txn_type, amount, reference, status))
def _create_transaction(self, txn_type, amount, reference='',
status=''):
self.transactions.create(
txn_type=txn_type, amount=amount,
reference=reference, status=status)
# =======
# Actions
# =======
def allocate(self, amount, reference='', status=''):
"""
Convenience method for ring-fencing money against this source
"""
self.amount_allocated += amount
self.save()
self._create_transaction(
AbstractTransaction.AUTHORISE, amount, reference, status)
allocate.alters_data = True
def debit(self, amount=None, reference='', status=''):
"""
Convenience method for recording debits against this source
"""
if amount is None:
amount = self.balance
self.amount_debited += amount
self.save()
self._create_transaction(
AbstractTransaction.DEBIT, amount, reference, status)
debit.alters_data = True
def refund(self, amount, reference='', status=''):
"""
Convenience method for recording refunds against this source
"""
self.amount_refunded += amount
self.save()
self._create_transaction(
AbstractTransaction.REFUND, amount, reference, status)
refund.alters_data = True
# ==========
# Properties
# ==========
@property
def balance(self):
"""
Return the balance of this source
"""
return (self.amount_allocated - self.amount_debited +
self.amount_refunded)
@property
def amount_available_for_refund(self):
"""
Return the amount available to be refunded
"""
return self.amount_debited - self.amount_refunded
class AbstractSourceType(models.Model):
"""
A type of payment source.
This could be an external partner like PayPal or DataCash,
or an internal source such as a managed account.
"""
name = models.CharField(_("Name"), max_length=128)
code = models.SlugField(
_("Code"), max_length=128,
help_text=_("This is used within forms to identify this source type"))
class Meta:
abstract = True
verbose_name = _("Source Type")
verbose_name_plural = _("Source Types")
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.code:
self.code = slugify(self.name)
super(AbstractSourceType, self).save(*args, **kwargs)
class AbstractBankcard(models.Model):
"""
Model representing a user's bankcard. This is used for two purposes:
1. The bankcard form will return an instance of this model that can be
used with payment gateways. In this scenario, the instance will
have additional attributes (start_date, issue_number, ccv) that
payment gateways need but that we don't save.
2. To keep a record of a user's bankcards and allow them to be
re-used. This is normally done using the 'partner reference'.
"""
user = models.ForeignKey(AUTH_USER_MODEL, related_name='bankcards',
verbose_name=_("User"))
card_type = models.CharField(_("Card Type"), max_length=128)
# Often you don't actually need the name on the bankcard
name = models.CharField(_("Name"), max_length=255, blank=True)
# We store an obfuscated version of the card number, just showing the last
# 4 digits.
number = models.CharField(_("Number"), max_length=32)
# We store a date even though only the month is visible. Bankcards are
# valid until the last day of the month.
expiry_date = models.DateField(_("Expiry Date"))
# For payment partners who are storing the full card details for us
partner_reference = models.CharField(
_("Partner Reference"), max_length=255, blank=True)
# Temporary data not persisted to the DB
start_date = None
issue_number = None
ccv = None
def __unicode__(self):
return _(u"%(card_type)s %(number)s (Expires: %(expiry)s)") % {
'card_type': self.card_type,
'number': self.number,
'expiry': self.expiry_month()}
def __init__(self, *args, **kwargs):
# Pop off the temporary data
self.start_date = kwargs.pop('start_date', None)
self.issue_number = kwargs.pop('issue_number', None)
self.ccv = kwargs.pop('ccv', None)
super(AbstractBankcard, self).__init__(*args, **kwargs)
self.card_type = bankcards.bankcard_type(self.number)
if self.card_type is None:
self.card_type = 'Unknown card type'
class Meta:
abstract = True
verbose_name = _("Bankcard")
verbose_name_plural = _("Bankcards")
def save(self, *args, **kwargs):
if not self.number.startswith('X'):
self.prepare_for_save()
super(AbstractBankcard, self).save(*args, **kwargs)
def prepare_for_save(self):
# This is the first time this card instance is being saved. We
# remove all sensitive data
self.number = u"XXXX-XXXX-XXXX-%s" % self.number[-4:]
self.start_date = self.issue_number = self.ccv = None
@property
def card_number(self):
"""
The card number
"""
import warnings
warnings.warn(("The `card_number` property is deprecated in favour of "
"`number` on the Bankcard model"), DeprecationWarning)
return self.number
@property
def cvv(self):
return self.ccv
@property
def obfuscated_number(self):
return u'XXXX-XXXX-XXXX-%s' % self.number[-4:]
def start_month(self, format='%m/%y'):
return self.start_date.strftime(format)
def expiry_month(self, format='%m/%y'):
return self.expiry_date.strftime(format)
| bsd-3-clause |
rhertzog/django | django/db/backends/postgresql/version.py | 632 | 1517 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL #.#.#
# EnterpriseDB #.#
# PostgreSQL #.# beta#
# PostgreSQL #.#beta#
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 90304 for 9.3.4. The last two digits will be 00 in the case of
releases (e.g., 90400 for 'PostgreSQL 9.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
| bsd-3-clause |
Byron/bcore | src/python/bkvstore/types.py | 1 | 3079 | #-*-coding:utf-8-*-
"""
@package bkvstore.types
@brief Implements a few types based on our base classes
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
from __future__ import unicode_literals
__all__ = ['YAMLKeyValueStoreModifier', 'ChangeTrackingJSONKeyValueStoreModifier',
'JSONStreamSerializer', 'YAMLStreamSerializer', 'JSONKeyValueStoreModifier']
import yaml
import json
from bdiff import AutoResolveAdditiveMergeDelegate
from butility import OrderedDict
from .persistence import OrderedDictYAMLLoader
from .serialize import (SerializingKeyValueStoreModifier,
ChangeTrackingSerializingKeyValueStoreModifier,
IStreamSerializer)
class YAMLStreamSerializer(IStreamSerializer):
"""Serialize from and to yaml"""
__slots__ = ()
# the extension of files we can read
file_extension = '.yaml'
def deserialize(self, stream):
"""@note can throw yaml.YAMLError, currently we don't use this information specifically"""
return yaml.load(stream, Loader=OrderedDictYAMLLoader) or dict()
def serialize(self, data, stream):
yaml.dump(data, stream)
# end class YAMLStreamSerializer
class YAMLKeyValueStoreModifier(SerializingKeyValueStoreModifier):
"""Implemnetation for yaml-based stores"""
__slots__ = ()
# the extension of files we can read
StreamSerializerType = YAMLStreamSerializer
# end class YAMLKeyValueStoreModifier
class JSONStreamSerializer(IStreamSerializer):
"""Serialize to and from json """
__slots__ = ()
file_extension = '.json'
def deserialize(self, stream):
"""@note can throw yaml.YAMLError, currently we don't use this information specifically
@todo can use object_pairs_hook = OrderedDict to load ordered dicts. But it made a test fail because
suddenly there were different values. Hints at a bug somewhere, but didn't look into it yet"""
return json.load(stream) or dict()
def serialize(self, data, stream):
"""Makes sure it is human readable
@note for now, we convert everything to a string, brutally. The KVStore would have to deal with
converting the string versions back, and it might not work for everything"""
json.dump(data, stream, indent=4, separators=(',', ': '), default=str)
# end class ChangeTrackingJSONKeyValueStoreModifier
class JSONKeyValueStoreModifier(SerializingKeyValueStoreModifier):
"""A modifier with change tracking and JSon serialization
@note no support for OrderedDicts just yet
"""
__slots__ = ()
StreamSerializerType = JSONStreamSerializer
# end class JSONKeyValueStoreModifier
class ChangeTrackingJSONKeyValueStoreModifier(ChangeTrackingSerializingKeyValueStoreModifier):
"""A modifier with change tracking and JSon serialization
@note no support for OrderedDicts just yet
"""
__slots__ = ()
StreamSerializerType = JSONStreamSerializer
# end class ChangeTrackingJSONKeyValueStoreModifier
| lgpl-3.0 |
frouty/odoogoeen | addons/account/report/__init__.py | 68 | 1601 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_central_journal
import account_general_journal
import account_journal
import account_balance
import account_partner_balance
import account_general_ledger
import account_partner_ledger
#import invoice
import account_print_invoice
#import overdue
import account_print_overdue
import account_aged_partner_balance
#import tax_report
import account_tax_report
import account_invoice_report
import account_report
import account_entries_report
import account_analytic_entries_report
import account_treasury_report
import account_financial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xmission/d-note | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| agpl-3.0 |
maheshp/novatest | nova/vnc/__init__.py | 25 | 1855 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for VNC Proxying."""
from oslo.config import cfg
vnc_opts = [
cfg.StrOpt('novncproxy_base_url',
default='http://127.0.0.1:6080/vnc_auto.html',
help='location of vnc console proxy, in the form '
'"http://127.0.0.1:6080/vnc_auto.html"'),
cfg.StrOpt('xvpvncproxy_base_url',
default='http://127.0.0.1:6081/console',
help='location of nova xvp vnc console proxy, in the form '
'"http://127.0.0.1:6081/console"'),
cfg.StrOpt('vncserver_listen',
default='127.0.0.1',
help='IP address on which instance vncservers should listen'),
cfg.StrOpt('vncserver_proxyclient_address',
default='127.0.0.1',
help='the address to which proxy clients '
'(like nova-xvpvncproxy) should connect'),
cfg.BoolOpt('vnc_enabled',
default=True,
help='enable vnc related features'),
cfg.StrOpt('vnc_keymap',
default='en-us',
help='keymap for vnc'),
]
CONF = cfg.CONF
CONF.register_opts(vnc_opts)
| apache-2.0 |
hanhlh/hadoop-0.20.2_FatBTree | contrib/hod/testing/main.py | 182 | 2928 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re
myPath = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myPath)
testingDir = os.path.join(rootDirectory, "testing")
sys.path.append(rootDirectory)
from testing.lib import printSeparator, printLine
moduleList = []
allList = []
excludes = [
]
# Build a module list by scanning through all files in testingDir
for file in os.listdir(testingDir):
if(re.search(r".py$", file) and re.search(r"^test", file)):
# All .py files with names starting in 'test'
module = re.sub(r"^test","",file)
module = re.sub(r".py$","",module)
allList.append(module)
if module not in excludes:
moduleList.append(module)
printLine("All testcases - %s" % allList)
printLine("Excluding the testcases - %s" % excludes)
printLine("Executing the testcases - %s" % moduleList)
testsResult = 0
# Now import each of these modules and start calling the corresponding
#testSuite methods
for moduleBaseName in moduleList:
try:
module = "testing.test" + moduleBaseName
suiteCaller = "Run" + moduleBaseName + "Tests"
printSeparator()
printLine("Running %s" % suiteCaller)
# Import the corresponding test cases module
imported_module = __import__(module , fromlist=[suiteCaller] )
# Call the corresponding suite method now
testRes = getattr(imported_module, suiteCaller)()
testsResult = testsResult + testRes
printLine("Finished %s. TestSuite Result : %s\n" % \
(suiteCaller, testRes))
except ImportError, i:
# Failed to import a test module
printLine(i)
testsResult = testsResult + 1
pass
except AttributeError, n:
# Failed to get suiteCaller from a test module
printLine(n)
testsResult = testsResult + 1
pass
except Exception, e:
# Test module suiteCaller threw some exception
printLine("%s failed. \nReason : %s" % (suiteCaller, e))
printLine("Skipping %s" % suiteCaller)
testsResult = testsResult + 1
pass
if testsResult != 0:
printSeparator()
printLine("Total testcases with failure or error : %s" % testsResult)
sys.exit(testsResult)
| apache-2.0 |
Shrulik/Open-Knesset | docs/api/source/conf.py | 14 | 7865 | # -*- coding: utf-8 -*-
#
# OpenKnesset documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 28 22:11:32 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Open Knesset API and embedding'
copyright = u'2011, 2012 The Public Knowledge Workshop'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0-devel'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenKnessetdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenKnesset.tex', u'OpenKnesset Documentation',
u'Hasadna', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openknesset', u'OpenKnesset API and Embedding Documentation',
[u'Hasadna'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenKnesset', u'OpenKnesset API and Embedding Documentation',
u'Hasadna', 'OpenKnesset', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
djbpitt/collatex | collatex-pythonport/collatex/tokenindex.py | 3 | 11153 | from ClusterShell.RangeSet import RangeSet
from collatex.block import Block
from collatex.core_classes import Token
from collatex.linsuffarr import SuffixArray
from collatex.linsuffarr import UNIT_BYTE
class Stack(list):
def push(self, item):
self.append(item)
def peek(self):
return self[-1]
# TokenIndex
class TokenIndex(object):
def __init__(self, witnesses):
self.witnesses = witnesses
# print("\nwitnesses=",witnesses)
self.counter = 0
self.witness_ranges = {}
self.token_array = []
self.witness_to_block_instances = {}
self.suffix_array = []
self.lcp_array = []
self.blocks = []
def prepare(self):
self._prepare_token_array()
# print("> self.token_array=", self.token_array)
# call third party library here
self.suffix_array = self.get_suffix_array()
self.lcp_array = self.get_lcp_array()
self.blocks = self.split_lcp_array_into_intervals()
self.construct_witness_to_block_instances_map()
@classmethod
def create_token_index(cls, collation):
token_index = TokenIndex(collation.witnesses)
token_index.prepare()
return token_index
def _prepare_token_array(self):
# TODO: the lazy init should move to somewhere else
# clear the suffix array and LCP array cache
self.cached_suffix_array = None
token_array_position = 0
for idx, witness in enumerate(self.witnesses):
# print("witness.tokens",witness.tokens())
witness_range = RangeSet()
witness_range.add_range(self.counter, self.counter + len(witness.tokens()))
# the extra one is for the marker token
self.counter += len(witness.tokens()) + 1
self.witness_ranges[witness.sigil] = witness_range
# remember get tokens twice
sigil = witness.sigil
for token in witness.tokens():
token.token_data['_sigil'] = sigil
token.token_data['_token_array_position'] = token_array_position
token_array_position += 1
self.token_array.extend(witness.tokens())
# # add marker token
self.token_array.append(Token({"n": '$' + str(idx), '_sigil': sigil}))
token_array_position += 1
self.token_array.pop() # remove last marker
def split_lcp_array_into_intervals(self):
closed_intervals = []
previous_lcp_value = 0
open_intervals = Stack()
for idx in range(0, len(self.lcp_array)):
lcp_value = self.lcp_array[idx]
if lcp_value > previous_lcp_value:
open_intervals.push(Block(self, start=idx - 1, length=lcp_value))
previous_lcp_value = lcp_value
elif lcp_value < previous_lcp_value:
# close open intervals that are larger than current LCP value
while open_intervals and open_intervals.peek().length > lcp_value:
a = open_intervals.pop()
closed_intervals.append(Block(self, start=a.start, end=idx - 1, length=a.length))
# then: open a new interval starting with filtered intervals
if lcp_value > 0:
start = closed_intervals[len(closed_intervals) - 1].start
open_intervals.push(Block(self, start=start, length=lcp_value))
previous_lcp_value = lcp_value
# add all the open intervals to the result
# print("> open_intervals=", open_intervals)
# print("> closed_intervals=", closed_intervals)
for interval in open_intervals:
if interval.length > 0:
closed_intervals.append(
Block(self, start=interval.start, end=len(self.lcp_array) - 1, length=interval.length))
# print("> closed_intervals=", closed_intervals)
return closed_intervals
def get_range_for_witness(self, witness_sigil):
if witness_sigil not in self.witness_ranges:
raise Exception("Witness " + witness_sigil + " is not added to the collation!")
return self.witness_ranges[witness_sigil]
def get_sa(self):
# NOTE: implemented in a lazy manner, since calculation of the Suffix Array and LCP Array takes time
# print("token_array=",self.token_array)
# for token in self.token_array:
# print(token,":",type(token))
if not self.cached_suffix_array:
# string_array = ''.join([token.token_string for token in self.token_array])
string_array = [token.token_string for token in self.token_array]
# Unit byte is done to skip tokenization in third party library
##print("> string_array =", string_array)
self.cached_suffix_array = SuffixArray(string_array, unit=UNIT_BYTE)
##print("> suffix_array:\n", self.cached_suffix_array)
return self.cached_suffix_array
def get_suffix_array(self):
sa = self.get_sa()
return sa.SA
def get_lcp_array(self):
sa = self.get_sa()
return sa._LCP_values
def start_token_position_for_witness(self, witness):
return self.get_range_for_witness(witness.sigil)[0]
def block_instances_for_witness(self, witness):
return self.witness_to_block_instances.setdefault(witness.sigil, [])
def construct_witness_to_block_instances_map(self):
self.witness_to_block_instances = {}
##print("> self.blocks", self.blocks)
for interval in self.blocks:
for instance in interval.get_all_instances():
##print(">instance = ", instance)
w = instance.get_witness_sigil()
instances = self.witness_to_block_instances.setdefault(w, [])
instances.append(instance)
# # NOTE: LCP intervals can 1) ascend or 2) descend or 3) first ascend and then descend. 4) descend, ascend
# def split_lcp_array_into_intervals(self):
# closed_intervals = []
# previous_lcp_value = 0
# open_intervals = Stack()
# for idx, lcp_value in enumerate(self.lcp_array):
# # print(lcp_value)
# if lcp_value > previous_lcp_value:
# open_intervals.push((idx - 1, lcp_value))
# previous_lcp_value = lcp_value
# if lcp_value < previous_lcp_value:
# # close open intervals that are larger than current lcp_value
# while open_intervals and open_intervals.peek()[1] > lcp_value:
# # print("Peek: "+str(open_intervals.peek()))
# (start, length) = open_intervals.pop()
# # TODO: FIX NUMBER OF SIBLINGS!
# closed_intervals.append(LCPInterval(self, start, idx - 1, length, 0))
# # print("new: "+repr(closed_intervals[-1]))
# # then: open a new interval starting with start filter open intervals.
# if lcp_value > 0:
# start = closed_intervals[-1].start
# open_intervals.push((start, lcp_value))
# previous_lcp_value = lcp_value
# # add all the open intervals to the result
# # print("Closing remaining:")
# for start, length in open_intervals:
# # TODO: FIX NUMBER OF SIBLINGS!
# closed_intervals.append(LCPInterval(self, start, len(self.lcp_array) - 1, length, 0))
# # print("new: "+repr(closed_intervals[-1]))
# return closed_intervals
# factory method for testing purposes only!
@classmethod
def for_test(cls, sa_array, lcp_array):
token_index = TokenIndex(None)
token_index.suffix_array = sa_array
token_index.lcp_array = lcp_array
return token_index # parts of the LCP array become potential blocks.
# # minimum block_length: the number of tokens a single occurrence of this block spans
# # block_occurrences: the ranges within the suffix array that this block spans
# class LCPInterval(object):
# def __init__(self, token_index, start, end, length, number_of_siblings):
# self.token_index = token_index
# self.start = start
# self.end = end
# self.length = length
# self.number_of_siblings = number_of_siblings
#
# @property
# def minimum_block_length(self):
# return self.length
#
# @property
# def number_of_occurrences(self):
# return self.end - self.start + 1
#
# def block_occurrences(self):
# block_occurrences = []
# for idx in range(self.start, self.end + 1):
# block_occurrences.append(self.token_index.suffix_array[idx])
# return block_occurrences
#
# def info(self):
# return "looking at: " + str(self)
#
# @property
# def token_start_position(self):
# return min(self.block_occurrences())
#
# def _as_range(self):
# # convert interval into range
# range_set = RangeSet()
# for occurrence in self.block_occurrences():
# range_set.add_range(occurrence, occurrence + self.minimum_block_length)
# return range_set
#
# @property
# def number_of_witnesses(self):
# range = self._as_range()
# number_of_witnesses = 0
# for witness_range in self.token_index.witness_ranges.values():
# if witness_range.intersection(range):
# number_of_witnesses += 1
# return number_of_witnesses
#
# def show_lcp_array(self):
# return self.LCP[self.start:self.end + 1]
#
# def __lt__(self, other):
# same = other.number_of_witnesses == self.number_of_witnesses
# if not same:
# return other.number_of_witnesses < self.number_of_witnesses
#
# same = other.length == self.length
# if not same:
# return other.length < self.length
#
# return self.number_of_occurrences < other.number_of_occurrences
#
# def __str__(self):
# tokens = (token.token_string for token in self.token_index.token_array[
# self.token_index.suffix_array[self.start]:
# self.token_index.suffix_array[self.start] + min(10,
# self.minimum_block_length)])
# part1 = "<" + " ".join(tokens)
# return part1 + "> with " + str(self.number_of_witnesses) + ":" + str(
# self.number_of_occurrences) + " witnesses/occurrences and length: " + str(
# self.minimum_block_length) + " and number of siblings: " + str(self.number_of_siblings)
#
# # start (suffix), length, depth, frequency
# def __repr__(self):
# return "LCPivl: " + str(self.start) + "," + str(self.minimum_block_length) + "," + str(
# self.number_of_witnesses) + "," + str(self.number_of_occurrences)
| gpl-3.0 |
ngonzalvez/sentry | src/sentry/quotas/redis.py | 13 | 3036 | """
sentry.quotas.redis
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import time
from django.conf import settings
from rb import Cluster
from sentry.exceptions import InvalidConfiguration
from sentry.quotas.base import Quota, RateLimited, NotRateLimited
class RedisQuota(Quota):
ttl = 60
def __init__(self, **options):
if not options:
# inherit default options from REDIS_OPTIONS
options = settings.SENTRY_REDIS_OPTIONS
super(RedisQuota, self).__init__(**options)
options.setdefault('hosts', {0: {}})
self.cluster = Cluster(options['hosts'])
def validate(self):
try:
with self.cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(unicode(e))
def is_rate_limited(self, project):
proj_quota = self.get_project_quota(project)
if project.team:
team_quota = self.get_team_quota(project.team)
else:
team_quota = 0
system_quota = self.get_system_quota()
if not (proj_quota or system_quota or team_quota):
return NotRateLimited
sys_result, team_result, proj_result = self._incr_project(project)
if proj_quota and proj_result > proj_quota:
return RateLimited(retry_after=self.get_time_remaining())
if team_quota and team_result > team_quota:
return RateLimited(retry_after=self.get_time_remaining())
if system_quota and sys_result > system_quota:
return RateLimited(retry_after=self.get_time_remaining())
return NotRateLimited
def get_time_remaining(self):
return int(self.ttl - (
time.time() - int(time.time() / self.ttl) * self.ttl))
def _get_system_key(self):
return 'quota:s:%s' % (int(time.time() / self.ttl),)
def _get_team_key(self, team):
return 'quota:t:%s:%s' % (team.id, int(time.time() / self.ttl))
def _get_project_key(self, project):
return 'quota:p:%s:%s' % (project.id, int(time.time() / self.ttl))
def _incr_project(self, project):
if project.team:
team_key = self._get_team_key(project.team)
else:
team_key = None
team_result = None
proj_key = self._get_project_key(project)
sys_key = self._get_system_key()
with self.cluster.map() as client:
proj_result = client.incr(proj_key)
client.expire(proj_key, self.ttl)
sys_result = client.incr(sys_key)
client.expire(sys_key, self.ttl)
if team_key:
team_result = client.incr(team_key)
client.expire(team_key, self.ttl)
return (
int(sys_result.value),
int(team_result and team_result.value or 0),
int(proj_result.value),
)
| bsd-3-clause |
AlexStarov/Shop | applications/delivery/migrations/0046_auto_20151120_1304.py | 1 | 3101 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import applications.delivery.models
import datetime
import compat.ImageWithThumbs.models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('delivery', '0045_auto_20151120_1301'),
]
operations = [
migrations.CreateModel(
name='Email_Img',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(db_index=True)),
('name', models.CharField(max_length=256, null=True, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0438', blank=True)),
('tag_name', models.CharField(help_text='TAG \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u0435 \u043c\u043e\u0436\u0435\u0442 \u0431\u044b\u0442\u044c \u0434\u043b\u0438\u043d\u0435\u0435 8 \u0441\u0438\u043c\u0432\u043e\u043b\u043e\u0432, \u0442\u043e\u043b\u044c\u043a\u043e \u0430\u043d\u0433\u043b\u0438\u0439\u0441\u043a\u0438\u0435 \u043c\u0430\u043b\u0435\u043d\u044c\u043a\u0438\u0435 \u0431\u0443\u043a\u0432\u044b \u0438 \u0446\u0438\u0444\u0440\u044b \u0431\u0435\u0437 \u043f\u0440\u043e\u0431\u0435\u043b\u043e\u0432 \u0438 \u043f\u043e\u0434\u0447\u0435\u0440\u043a\u0438\u0432\u0430\u043d\u0438\u0439', max_length=8, null=True, verbose_name="\u0418\u043c\u044f tag'a \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0438", blank=True)),
('image', compat.ImageWithThumbs.models.ImageWithThumbsField(upload_to=applications.delivery.models.set_path_img)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('content_type', models.ForeignKey(related_name='related_Email_Img', to='contenttypes.ContentType')),
],
options={
'ordering': ['-created_at'],
'db_table': 'EMail_Img',
'verbose_name': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0430 \u0434\u043b\u044f E-Mail',
'verbose_name_plural': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438\u0438 \u0434\u043b\u044f E-Mail',
},
),
migrations.AlterField(
model_name='delivery',
name='name',
field=models.CharField(default=b'2015-11-20T13:04:50.830821', max_length=128, null=True, verbose_name='\u0418\u043c\u044f \u0440\u0430\u0441\u0441\u044b\u043b\u043a\u0438', blank=True),
),
migrations.AlterField(
model_name='mailaccount',
name='auto_active_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 20, 13, 4, 50, 825134), verbose_name='\u0414\u0430\u0442\u0430 \u0437\u0430\u043a\u0440\u044b\u0442\u0438\u044f \u0430\u043a\u043a\u0430\u0443\u043d\u0442\u0430'),
),
]
| apache-2.0 |
edxzw/edx-platform | common/djangoapps/student/management/commands/get_grades.py | 165 | 5207 | """
Management command to generate a list of grades for
all students that are enrolled in a course.
"""
from courseware import grades, courses
from certificates.models import GeneratedCertificate
from django.test.client import RequestFactory
from django.core.management.base import BaseCommand, CommandError
import os
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django.contrib.auth.models import User
from optparse import make_option
import datetime
from django.core.handlers.base import BaseHandler
import csv
class RequestMock(RequestFactory):
def request(self, **request):
"Construct a generic request object."
request = RequestFactory.request(self, **request)
handler = BaseHandler()
handler.load_middleware()
for middleware_method in handler._request_middleware:
if middleware_method(request):
raise Exception("Couldn't create request mock object - "
"request middleware returned a response")
return request
class Command(BaseCommand):
help = """
Generate a list of grades for all students
that are enrolled in a course.
CSV will include the following:
- username
- email
- grade in the certificate table if it exists
- computed grade
- grade breakdown
Outputs grades to a csv file.
Example:
sudo -u www-data SERVICE_VARIANT=lms /opt/edx/bin/django-admin.py get_grades \
-c MITx/Chi6.00intro/A_Taste_of_Python_Programming -o /tmp/20130813-6.00x.csv \
--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform
"""
option_list = BaseCommand.option_list + (
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='Course ID for grade distribution'),
make_option('-o', '--output',
metavar='FILE',
dest='output',
default=False,
help='Filename for grade output'))
def handle(self, *args, **options):
if os.path.exists(options['output']):
raise CommandError("File {0} already exists".format(
options['output']))
STATUS_INTERVAL = 100
# parse out the course into a coursekey
if options['course']:
try:
course_key = CourseKey.from_string(options['course'])
# if it's not a new-style course key, parse it from an old-style
# course key
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(options['course'])
print "Fetching enrolled students for {0}".format(course_key)
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key
)
factory = RequestMock()
request = factory.get('/')
total = enrolled_students.count()
print "Total enrolled: {0}".format(total)
course = courses.get_course_by_id(course_key)
total = enrolled_students.count()
start = datetime.datetime.now()
rows = []
header = None
print "Fetching certificate data"
cert_grades = {
cert.user.username: cert.grade
for cert in list(
GeneratedCertificate.objects.filter(course_id=course_key).prefetch_related('user')
)
}
print "Grading students"
for count, student in enumerate(enrolled_students):
count += 1
if count % STATUS_INTERVAL == 0:
# Print a status update with an approximation of
# how much time is left based on how long the last
# interval took
diff = datetime.datetime.now() - start
timeleft = diff * (total - count) / STATUS_INTERVAL
hours, remainder = divmod(timeleft.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print "{0}/{1} completed ~{2:02}:{3:02}m remaining".format(
count, total, hours, minutes)
start = datetime.datetime.now()
request.user = student
grade = grades.grade(student, request, course)
if not header:
header = [section['label'] for section in grade[u'section_breakdown']]
rows.append(["email", "username", "certificate-grade", "grade"] + header)
percents = {section['label']: section['percent'] for section in grade[u'section_breakdown']}
row_percents = [percents[label] for label in header]
if student.username in cert_grades:
rows.append([student.email, student.username, cert_grades[student.username], grade['percent']] + row_percents)
else:
rows.append([student.email, student.username, "N/A", grade['percent']] + row_percents)
with open(options['output'], 'wb') as f:
writer = csv.writer(f)
writer.writerows(rows)
| agpl-3.0 |
ivaano/zato | code/zato-cli/src/zato/cli/zato_command.py | 6 | 11613 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import argparse
# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
import cloghandler
cloghandler = cloghandler # For pyflakes
# Zato
from zato.cli import ca_create_ca as ca_create_ca_mod, ca_create_lb_agent as ca_create_lb_agent_mod, \
ca_create_server as ca_create_server_mod, ca_create_web_admin as ca_create_web_admin_mod, \
check_config as check_config_mod, component_version as component_version_mod, create_cluster as create_cluster_mod, \
create_lb as create_lb_mod, create_odb as create_odb_mod, create_server as create_server_mod, \
create_web_admin as create_web_admin_mod, crypto as crypto_mod, delete_odb as delete_odb_mod, \
enmasse as enmasse_mod, FromConfig, info as info_mod, migrate as migrate_mod, quickstart as quickstart_mod, run_command, \
service as service_mod, start as start_mod, stop as stop_mod, web_admin_auth as web_admin_auth_mod
from zato.common import version
def add_opts(parser, opts):
""" Adds parser-specific options.
"""
for opt in opts:
arguments = {}
for name in('help', 'action', 'default'):
try:
arguments[name] = opt[name]
except KeyError:
# Almost no command uses 'action' or 'default' parameters
pass
parser.add_argument(opt['name'], **arguments)
def get_parser():
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('--store-log', help='Whether to store an execution log', action='store_true')
base_parser.add_argument('--verbose', help='Show verbose output', action='store_true')
base_parser.add_argument(
'--store-config',
help='Whether to store config options in a file for a later use', action='store_true')
parser = argparse.ArgumentParser(prog='zato')
parser.add_argument('--version', action='version', version=version)
subs = parser.add_subparsers()
#
# ca
#
ca = subs.add_parser('ca', description='Basic certificate authority (CA) management')
ca_subs = ca.add_subparsers()
ca_create = ca_subs.add_parser('create', description='Creates crypto material for Zato components')
ca_create_subs = ca_create.add_subparsers()
ca_create_ca = ca_create_subs.add_parser('ca', description=ca_create_ca_mod.Create.__doc__, parents=[base_parser])
ca_create_ca.set_defaults(command='ca_create_ca')
ca_create_ca.add_argument('path', help='Path to an empty directory to hold the CA')
add_opts(ca_create_ca, ca_create_ca_mod.Create.opts)
ca_create_lb_agent = ca_create_subs.add_parser('lb_agent', description=ca_create_lb_agent_mod.Create.__doc__, parents=[base_parser])
ca_create_lb_agent.set_defaults(command='ca_create_lb_agent')
ca_create_lb_agent.add_argument('path', help='Path to a CA directory')
add_opts(ca_create_lb_agent, ca_create_lb_agent_mod.Create.opts)
ca_create_server = ca_create_subs.add_parser('server', description=ca_create_server_mod.Create.__doc__, parents=[base_parser])
ca_create_server.set_defaults(command='ca_create_server')
ca_create_server.add_argument('path', help='Path to a CA directory')
add_opts(ca_create_server, ca_create_server_mod.Create.opts)
ca_create_web_admin = ca_create_subs.add_parser('web_admin', description=ca_create_web_admin_mod.Create.__doc__, parents=[base_parser])
ca_create_web_admin.set_defaults(command='ca_create_web_admin')
ca_create_web_admin.add_argument('path', help='Path to a CA directory')
add_opts(ca_create_web_admin, ca_create_web_admin_mod.Create.opts)
#
# check-config
#
check_config = subs.add_parser(
'check-config',
description='Checks config of a Zato component (currently limited to servers only)',
parents=[base_parser])
check_config.set_defaults(command='check_config')
check_config.add_argument('path', help='Path to a Zato component')
add_opts(check_config, check_config_mod.CheckConfig.opts)
#
# component-version
#
component_version = subs.add_parser(
'component-version',
description='Shows the version of a Zato component installed in a given directory',
parents=[base_parser])
component_version.set_defaults(command='component_version')
component_version.add_argument('path', help='Path to a Zato component')
add_opts(component_version, component_version_mod.ComponentVersion.opts)
#
# create
#
create = subs.add_parser('create', description='Creates new Zato components')
create_subs = create.add_subparsers()
create_cluster = create_subs.add_parser('cluster', description=create_cluster_mod.Create.__doc__, parents=[base_parser])
create_cluster.set_defaults(command='create_cluster')
add_opts(create_cluster, create_cluster_mod.Create.opts)
create_lb = create_subs.add_parser('load_balancer', description=create_lb_mod.Create.__doc__, parents=[base_parser])
create_lb.add_argument('path', help='Path to an empty directory to install the load-balancer in')
create_lb.set_defaults(command='create_lb')
add_opts(create_lb, create_lb_mod.Create.opts)
create_odb = create_subs.add_parser('odb', description=create_odb_mod.Create.__doc__, parents=[base_parser])
create_odb.set_defaults(command='create_odb')
add_opts(create_odb, create_odb_mod.Create.opts)
create_server = create_subs.add_parser('server', description=create_server_mod.Create.__doc__, parents=[base_parser])
create_server.add_argument('path', help='Path to an empty directory to install the server in')
create_server.set_defaults(command='create_server')
add_opts(create_server, create_server_mod.Create.opts)
create_user = create_subs.add_parser('user', description=web_admin_auth_mod.CreateUser.__doc__, parents=[base_parser])
create_user.add_argument('path', help='Path to a web admin')
create_user.set_defaults(command='create_user')
add_opts(create_user, web_admin_auth_mod.CreateUser.opts)
create_web_admin = create_subs.add_parser('web_admin', description=create_web_admin_mod.Create.__doc__, parents=[base_parser])
create_web_admin.add_argument('path', help='Path to an empty directory to install a new web admin in')
create_web_admin.set_defaults(command='create_web_admin')
add_opts(create_web_admin, create_web_admin_mod.Create.opts)
#
# decrypt
#
decrypt = subs.add_parser('decrypt', description=crypto_mod.Decrypt.__doc__, parents=[base_parser])
decrypt.add_argument('path', help='Path to the private key in PEM')
decrypt.set_defaults(command='decrypt')
add_opts(decrypt, crypto_mod.Decrypt.opts)
#
# delete
#
delete = subs.add_parser('delete', description=delete_odb_mod.Delete.__doc__)
delete_subs = delete.add_subparsers()
delete_odb = delete_subs.add_parser('odb', description='Deletes a Zato ODB', parents=[base_parser])
delete_odb.set_defaults(command='delete_odb')
add_opts(delete_odb, delete_odb_mod.Delete.opts)
#
# encrypt
#
encrypt = subs.add_parser('encrypt', description=crypto_mod.Encrypt.__doc__, parents=[base_parser])
encrypt.add_argument('path', help='Path to the public key in PEM')
encrypt.set_defaults(command='encrypt')
add_opts(encrypt, crypto_mod.Encrypt.opts)
#
# enmasse
#
enmasse = subs.add_parser('enmasse', description=enmasse_mod.EnMasse.__doc__, parents=[base_parser])
enmasse.add_argument('path', help='Path to a running Zato server')
enmasse.set_defaults(command='enmasse')
add_opts(enmasse, enmasse_mod.EnMasse.opts)
#
# info
#
info = subs.add_parser('info', description=info_mod.Info.__doc__, parents=[base_parser])
info.add_argument('path', help='Path to a Zato component')
info.set_defaults(command='info')
add_opts(info, info_mod.Info.opts)
#
# from-config-file
#
from_config = subs.add_parser('from-config', description=FromConfig.__doc__, parents=[base_parser])
from_config.add_argument('path', help='Path to a Zato command config file')
from_config.set_defaults(command='from_config')
#
# migrate
#
migrate = subs.add_parser('migrate', description=migrate_mod.Migrate.__doc__, parents=[base_parser])
migrate.add_argument('path', help='Path to a Zato component')
migrate.set_defaults(command='migrate')
add_opts(migrate, migrate_mod.Migrate.opts)
#
# quickstart
#
quickstart = subs.add_parser('quickstart', description='Quickly set up and manage Zato clusters', parents=[base_parser])
quickstart_subs = quickstart.add_subparsers()
quickstart_create = quickstart_subs.add_parser('create', description=quickstart_mod.Create.__doc__, parents=[base_parser])
quickstart_create.add_argument('path', help='Path to an empty directory for the quickstart cluster')
quickstart_create.set_defaults(command='quickstart_create')
add_opts(quickstart_create, quickstart_mod.Create.opts)
#
# service
#
service = subs.add_parser('service', description='Commands related to the management of Zato services')
service_subs = service.add_subparsers()
service_invoke = service_subs.add_parser('invoke', description=service_mod.Invoke.__doc__, parents=[base_parser])
service_invoke.set_defaults(command='service_invoke')
add_opts(service_invoke, service_mod.Invoke.opts)
#
# start
#
start = subs.add_parser('start', description=start_mod.Start.__doc__, parents=[base_parser], formatter_class=argparse.RawDescriptionHelpFormatter)
start.add_argument('path', help='Path to the Zato component to be started')
start.set_defaults(command='start')
add_opts(start, start_mod.Start.opts)
#
# stop
#
stop = subs.add_parser('stop', description=stop_mod.Stop.__doc__, parents=[base_parser])
stop.add_argument('path', help='Path to the Zato component to be stopped')
stop.set_defaults(command='stop')
#
# update
#
update = subs.add_parser('update', description='Updates Zato components and users')
update_subs = update.add_subparsers()
# .. update crypto
update_crypto = update_subs.add_parser('crypto', description=crypto_mod.UpdateCrypto.__doc__, parents=[base_parser])
update_crypto.add_argument('path', help='Path to a Zato component')
update_crypto.set_defaults(command='update_crypto')
add_opts(update_crypto, crypto_mod.UpdateCrypto.opts)
# .. update password
update_password = update_subs.add_parser('password', description=web_admin_auth_mod.UpdatePassword.__doc__, parents=[base_parser])
update_password.add_argument('path', help='Path to a web admin directory')
update_password.set_defaults(command='update_password')
add_opts(update_password, web_admin_auth_mod.UpdatePassword.opts)
# .. update password
update_open_id = update_subs.add_parser('openid', description=web_admin_auth_mod.UpdateOpenID.__doc__, parents=[base_parser])
update_open_id.add_argument('path', help='Path to a web admin directory')
update_open_id.set_defaults(command='update_openid')
add_opts(update_open_id, web_admin_auth_mod.UpdateOpenID.opts)
return parser
def main():
return run_command(get_parser().parse_args())
| gpl-3.0 |
cancan101/tensorflow | tensorflow/python/ops/candidate_sampling_ops.py | 55 | 17372 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for candidate sampling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_candidate_sampling_ops
from tensorflow.python.ops import math_ops
def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a uniform base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is the uniform distribution
over the range of integers `[0, range_max)`.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._uniform_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a log-uniform (Zipfian) base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is an approximately log-uniform
or Zipfian distribution:
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
This sampler is useful when the target classes approximately follow such
a distribution - for example, if the classes represent words in a lexicon
sorted in decreasing order of frequency. If your classes are not ordered by
decreasing frequency, do not use this op.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._log_uniform_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled,
unique, range_max, seed=None, name=None):
"""Samples a set of classes from a distribution learned during training.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is constructed on the fly
during training. It is a unigram distribution over the target
classes seen so far during training. Every integer in `[0, range_max)`
begins with a weight of 1, and is incremented by 1 each time it is
seen as a target class. The base distribution is not saved to checkpoints,
so it is reset when the model is reloaded.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._learned_unigram_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def fixed_unigram_candidate_sampler(true_classes,
num_true,
num_sampled,
unique,
range_max,
vocab_file='',
distortion=1.0,
num_reserved_ids=0,
num_shards=1,
shard=0,
unigrams=(),
seed=None,
name=None):
"""Samples a set of classes using the provided (fixed) base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution is read from a file or passed in as an
in-memory array. There is also an option to skew the distribution by
applying a distortion power to the weights.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
vocab_file: Each valid line in this file (which should have a CSV-like
format) corresponds to a valid word ID. IDs are in sequential order,
starting from num_reserved_ids. The last entry in each line is expected
to be a value corresponding to the count or relative probability. Exactly
one of `vocab_file` and `unigrams` needs to be passed to this operation.
distortion: The distortion is used to skew the unigram probability
distribution. Each weight is first raised to the distortion's power
before adding to the internal unigram distribution. As a result,
`distortion = 1.0` gives regular unigram sampling (as defined by the vocab
file), and `distortion = 0.0` gives a uniform distribution.
num_reserved_ids: Optionally some reserved IDs can be added in the range
`[0, num_reserved_ids]` by the users. One use case is that a special
unknown word token is used as ID 0. These IDs will have a sampling
probability of 0.
num_shards: A sampler can be used to sample from a subset of the original
range in order to speed up the whole computation through parallelism. This
parameter (together with `shard`) indicates the number of partitions that
are being used in the overall computation.
shard: A sampler can be used to sample from a subset of the original range
in order to speed up the whole computation through parallelism. This
parameter (together with `num_shards`) indicates the particular partition
number of the operation, when partitioning is being used.
unigrams: A list of unigram counts or probabilities, one per ID in
sequential order. Exactly one of `vocab_file` and `unigrams` should be
passed to this operation.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._fixed_unigram_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max,
vocab_file=vocab_file, distortion=distortion,
num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard,
unigrams=unigrams, seed=seed1, seed2=seed2, name=name)
def all_candidate_sampler(true_classes, num_true, num_sampled, unique,
seed=None, name=None):
"""Generate the set of all classes.
Deterministically generates and returns the set of all possible classes.
For testing purposes. There is no need to use this, since you might as
well use full softmax or full logistic regression.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of possible classes.
unique: A `bool`. Ignored.
unique.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
This operation deterministically returns the entire range
`[0, num_sampled]`.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`. All returned values are 1.0.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`. All returned values are 1.0.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._all_candidate_sampler(
true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2,
name=name)
def compute_accidental_hits(true_classes, sampled_candidates, num_true,
seed=None, name=None):
"""Compute the position ids in `sampled_candidates` matching `true_classes`.
In Candidate Sampling, this operation facilitates virtually removing
sampled classes which happen to match target classes. This is done
in Sampled Softmax and Sampled Logistic.
See our [Candidate Sampling Algorithms
Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf).
We presuppose that the `sampled_candidates` are unique.
We call it an 'accidental hit' when one of the target classes
matches one of the sampled classes. This operation reports
accidental hits as triples `(index, id, weight)`, where `index`
represents the row number in `true_classes`, `id` represents the
position in `sampled_candidates`, and weight is `-FLOAT_MAX`.
The result of this op should be passed through a `sparse_to_dense`
operation, then added to the logits of the sampled classes. This
removes the contradictory effect of accidentally sampling the true
target classes as noise classes for the same example.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled_candidates output of CandidateSampler.
num_true: An `int`. The number of target classes per training example.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`.
Values indicate rows in `true_classes`.
ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`.
Values indicate positions in `sampled_candidates`.
weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`.
Each value is `-FLOAT_MAX`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._compute_accidental_hits(
true_classes, sampled_candidates, num_true, seed=seed1, seed2=seed2,
name=name)
| apache-2.0 |
iancze/EchelleJSON | EchelleJSON.py | 1 | 2136 | import numpy as np
import json
from astropy.utils.misc import JsonCustomEncoder
order_prefix = "order_"
def read(fname):
'''
A simple routine to read a JSON format and return dictionaries of numpy arrays.
'''
f = open(fname, 'r')
echelle_dict = json.load(f)
f.close()
# go through each of the orders and convert these from lists to numpy arrays
order_keys = [key for key in echelle_dict.keys() if "order_" in key]
for key in order_keys:
order = echelle_dict[key]
echelle_dict[key]["wl"] = np.array(order["wl"])
echelle_dict[key]["fl"] = np.array(order["fl"])
# If it has sigma, convert it too
if "sigma" in order.keys():
echelle_dict[key]["sigma"] = np.array(order["sigma"])
return echelle_dict
def write(fname, echelle_dict):
'''
A simple routine to turn dictionaries of numpy arrays into JSON format.
'''
# check that echelle fields are labeled with `order_{name}` (there must be at least one)
assert type(echelle_dict) is dict, '''You must pass in a dictionary... \n
echelle_dict = {"order_00":{"wl": [5003.3, ...], "fl": [0.01, ...]}}'''
# check that each order has at least wl and fl, and that they are the same length
order_keys = [key for key in echelle_dict.keys() if "order_" in key]
for key in order_keys:
order = echelle_dict[key]
assert "wl" in order.keys(), "Must contain wl as a key."
assert "fl" in order.keys(), "Must contain fl as a key."
# Assert wl is strictly increasing and that there are no duplicate wl pixels
wl = order["wl"]
assert np.all(np.diff(wl) > 0), "wl for {} must be strictly increasing and contain no duplicate pixels.".format(key)
assert len(wl) == len(order["fl"]), "wl and fl must be the same length 1-D arrays."
f = open(fname, 'w')
# json.dump(echelle_dict, f, cls=encoder)
json.dump(echelle_dict, f, cls=JsonCustomEncoder, sort_keys=True, indent=2)
f.close()
# Assume that we are using the module as a command line script.
def main():
pass
if __name__=="__main__":
main()
| mit |
diegocortassa/TACTIC | src/tactic/command/notification_cmd.py | 1 | 3344 | ############################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = ['NotificationTestCmd']
from pyasm.common import Container, Environment, TacticException
from pyasm.command import Command, Trigger
from pyasm.search import SearchKey, SearchType
from pyasm.biz import Project
class NotificationTestCmd(Command):
'''Do a dry-run of a notification sending'''
def get_base_search_type(self):
return ''
def get_prev_value(self):
return ''
def execute(self):
notification = self.kwargs.get('sobject_dict')
search_key = notification.get('__search_key__')
event = notification.get('event')
parts = event.split('|')
if len(parts) < 2:
raise TacticException('event should be in the form of {action}|{search_type}, e.g. update|sthpw/task or update|sthpw/task|status')
orig_search_type = parts[1]
search_type_obj = SearchType.get(orig_search_type)
sobject = SearchType.create(orig_search_type)
self.sobjects.append(sobject)
search_type = SearchKey.extract_search_type(search_key)
search_id = notification.get('id')
columns = search_type_obj.get_columns(orig_search_type)
for column in columns:
type = search_type_obj.get_column_type(orig_search_type,column)
if column == 'search_type':
value = search_type
elif column == 'search_id':
value = search_id
elif column == 'project_code':
value = Project.get_project_code()
elif column in ['assigned', 'login']:
value = Environment.get_user_name()
elif type in ['integer','float','number']:
value = 100
elif type == 'timestamp':
value = '20110101'
else:
value = '%s_test'%column
try:
sobject.set_value(column, value)
except:
continue
notification_stype = notification.get('search_type')
if notification_stype:
sobject.set_value("search_type", notification_stype)
# some dummy output
output = {'id': sobject.get_id()}
notification_process = notification.get('process')
if notification_process:
if 'process' in columns:
sobject.set_value("process", notification_process)
try:
triggers = Trigger.call(sobject, event, output=output, forced_mode='same process,same transaction', process = notification_process, search_type = notification_stype)
if triggers:
for idx, trigger in enumerate(triggers):
self.add_description('%s. %s' %(idx+1, trigger.get_description()))
else:
raise TacticException('No notification trigger is fired. Possible mismatched project_code for this notification entry.')
except Exception, e:
raise
raise Exception(e.__str__())
| epl-1.0 |
valdecdev/odoo | openerp/report/render/rml2pdf/utils.py | 48 | 6141 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.info('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.info('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.info('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.info('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
txt = None
try:
expr = sps.pop(0)
txt = eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
_logger.info("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext)
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
| agpl-3.0 |
ooici/pyon | pyon/util/container_stats_plugin.py | 1 | 1723 | """
This plugin requests that the container log all collected statistics
Add this command to the way you execute nose::
--with-stats
Each container will handle the request, so if running --with-pycc,
then statistics will appear in both container.log and pycc-container.log files.
Otherwise, container.log will show statistics.
"""
import time
import nose
from nose.plugins.base import Plugin
import subprocess
class TestStats(Plugin):
name = 'stats'
score = 1
def report(self, stream):
""" all tests have completed but --with-pycc has not yet stopped external container.
request that containers log statistics now
"""
# initialize pyon so we can get system name
from pyon.core import bootstrap
if not bootstrap.pyon_initialized:
bootstrap.bootstrap_pyon()
from pyon.public import get_sys_name, CFG
# make request: bin/pycc --sysname mgmt -x ion.processes.test.manage_system.ReportStats
null = open('/dev/null', 'w')
cmd = ['bin/pycc', '--sysname', get_sys_name(), '-x', 'ion.processes.test.manage_system.ReportStats' ]
status = subprocess.call(cmd, stdout=null, stderr=null)
if status==0:
stream.write('container statistics: a report request has been sent\n')
time.sleep(5) # give time to handle before container shutdown begins
else:
stream.write('container statistics: failed to send report request (logging anyway -- who needs a container?)\n')
from ooi.timer import get_accumulators
for a in get_accumulators().values():
a.log()
if __name__ == '__main__':
nose.main(addplugins=[TestStats()])
| bsd-2-clause |
mohseniaref/adore-doris | lib/python/basic/projections/ecef.py | 2 | 1932 | #ecef.py
#https://code.google.com/p/pysatel/source/browse/trunk/coord.py?r=22
from math import pow, degrees, radians
from scipy import mat, cos, sin, arctan, sqrt, pi, arctan2, deg2rad, rad2deg
#TO-DO: UPDATE THESE NUMBERS USING THE earth_radius.py
#
# Constants defined by the World Geodetic System 1984 (WGS84)
a = 6378.137
b = 6356.7523142
esq = 6.69437999014 * 0.001
e1sq = 6.73949674228 * 0.001
f = 1 / 298.257223563
def geodetic2ecef(lat, lon, alt, degrees=True):
"""geodetic2ecef(lat, lon, alt)
[deg][deg][m]
Convert geodetic coordinates to ECEF."""
if degrees:
lat=deg2rad(lat)
lon=deg2rad(lon)
#lat, lon = radians(lat), radians(lon)
xi = sqrt(1 - esq * sin(lat))
x = (a / xi + alt) * cos(lat) * cos(lon)
y = (a / xi + alt) * cos(lat) * sin(lon)
z = (a / xi * (1 - esq) + alt) * sin(lat)
return x, y, z
def ecef2geodetic(x, y, z, degrees=True):
"""ecef2geodetic(x, y, z)
[m][m][m]
Convert ECEF coordinates to geodetic.
J. Zhu, "Conversion of Earth-centered Earth-fixed coordinates \
to geodetic coordinates," IEEE Transactions on Aerospace and \
Electronic Systems, vol. 30, pp. 957-961, 1994."""
r = sqrt(x * x + y * y)
Esq = a * a - b * b
F = 54 * b * b * z * z
G = r * r + (1 - esq) * z * z - esq * Esq
C = (esq * esq * F * r * r) / (pow(G, 3))
S = cbrt(1 + C + sqrt(C * C + 2 * C))
P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)
Q = sqrt(1 + 2 * esq * esq * P)
r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \
P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)
U = sqrt(pow((r - esq * r_0), 2) + z * z)
V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)
Z_0 = b * b * z / (a * V)
h = U * (1 - b * b / (a * V))
lat = arctan((z + e1sq * Z_0) / r)
lon = arctan2(y, x)
return rad2deg(lat), rad2deg(lon), z
| gpl-2.0 |
amenonsen/ansible | test/units/modules/storage/netapp/test_na_ontap_vserver_cifs_security.py | 21 | 5961 | # (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vserver_cifs_security \
import NetAppONTAPCifsSecurity as cifs_security_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'cifs_security':
xml = self.build_port_info(self.data)
if self.type == 'error':
error = netapp_utils.zapi.NaApiError('test', 'error')
raise error
self.xml_out = xml
return xml
@staticmethod
def build_port_info(cifs_security_details):
''' build xml data for cifs-security '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'cifs-security': {
'is_aes_encryption_enabled': cifs_security_details['is_aes_encryption_enabled'],
'lm_compatibility_level': cifs_security_details['lm_compatibility_level']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_cifs_security = {
'is_aes_encryption_enabled': 'true',
'lm_compatibility_level': 'krb'
}
def mock_args(self):
return {
'is_aes_encryption_enabled': self.mock_cifs_security['is_aes_encryption_enabled'],
'lm_compatibility_level': self.mock_cifs_security['lm_compatibility_level'],
'vserver': 'ansible',
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'https': 'False'
}
def get_cifs_security_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_vserver_cifs_security object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_vserver_cifs_security object
"""
obj = cifs_security_module()
obj.asup_log_for_cserver = Mock(return_value=None)
obj.server = Mock()
obj.server.invoke_successfully = Mock()
if kind is None:
obj.server = MockONTAPConnection()
else:
obj.server = MockONTAPConnection(kind=kind, data=self.mock_cifs_security)
return obj
@patch('ansible.modules.storage.netapp.na_ontap_vserver_cifs_security.NetAppONTAPCifsSecurity.cifs_security_get_iter')
def test_successful_modify(self, get_cifs_security):
''' Test successful modify max throughput '''
data = self.mock_args()
set_module_args(data)
current = {
'is_aes_encryption_enabled': False,
'lm_compatibility_level': 'lm_ntlm_ntlmv2_krb'
}
get_cifs_security.side_effect = [
current
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_cifs_security_mock_object('cifs_security').apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_vserver_cifs_security.NetAppONTAPCifsSecurity.cifs_security_get_iter')
def test_modify_error(self, get_cifs_security):
''' Test create idempotency '''
data = self.mock_args()
set_module_args(data)
current = {
'is_aes_encryption_enabled': False
}
get_cifs_security.side_effect = [
current
]
with pytest.raises(AnsibleFailJson) as exc:
self.get_cifs_security_mock_object('error').apply()
assert exc.value.args[0]['msg'] == 'Error modifying cifs security on ansible: NetApp API failed. Reason - test:error'
| gpl-3.0 |
henriknelson/micropython | tests/pyb/adc.py | 6 | 1529 | from pyb import ADC, Timer
adct = ADC(16) # Temperature 930 -> 20C
print(adct)
adcv = ADC(17) # Voltage 1500 -> 3.3V
print(adcv)
# read single sample; 2.5V-5V is pass range
val = adcv.read()
assert val > 1000 and val < 2000
# timer for read_timed
tim = Timer(5, freq=500)
# read into bytearray
buf = bytearray(b'\xff' * 50)
adcv.read_timed(buf, tim)
print(len(buf))
for i in buf:
assert i > 50 and i < 150
# read into arrays with different element sizes
import array
arv = array.array('h', 25 * [0x7fff])
adcv.read_timed(arv, tim)
print(len(arv))
for i in arv:
assert i > 1000 and i < 2000
arv = array.array('i', 30 * [-1])
adcv.read_timed(arv, tim)
print(len(arv))
for i in arv:
assert i > 1000 and i < 2000
# Test read_timed_multi
arv = bytearray(b'\xff'*50)
art = bytearray(b'\xff'*50)
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 60 and i < 125
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 15 and i < 200
arv = array.array('i', 25 * [-1])
art = array.array('i', 25 * [-1])
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 1000 and i < 2000
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 50 and i < 2000
arv = array.array('h', 25 * [0x7fff])
art = array.array('h', 25 * [0x7fff])
ADC.read_timed_multi((adcv, adct), (arv, art), tim)
for i in arv:
assert i > 1000 and i < 2000
# Wide range: unsure of accuracy of temp sensor.
for i in art:
assert i > 50 and i < 2000
| mit |
daoluan/decode-Django | Django-1.5.1/django/contrib/auth/models.py | 74 | 18549 | from __future__ import unicode_literals
import re
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils.http import urlquote
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
# UNUSABLE_PASSWORD is still imported here for backwards compatibility
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable, UNUSABLE_PASSWORD)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def create_user(self, username, email=None, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = UserManager.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password, **extra_fields):
u = self.create_user(username, email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError()
def get_short_name(self):
raise NotImplementedError()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
if obj is not None:
permissions.update(backend.get_all_permissions(user, obj))
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if obj is not None:
if backend.has_perm(user, perm, obj):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'))
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text='Specific permissions for this user.')
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
permissions.update(backend.get_group_permissions(self,
obj))
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
warnings.warn("The use of AUTH_PROFILE_MODULE to define user profiles has been deprecated.",
PendingDeprecationWarning)
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable(
'You need to set AUTH_PROFILE_MODULE in your project '
'settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in '
'the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check '
'AUTH_PROFILE_MODULE in your project settings')
self._profile_cache = model._default_manager.using(
self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta:
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| gpl-2.0 |
alexlib/openpiv-python | openpiv/test/test_tools.py | 2 | 1613 | from openpiv.tools import imread, save, display_vector_field, transform_coordinates
from openpiv.pyprocess import extended_search_area_piv, get_coordinates
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.testing import compare
file_a = pathlib.Path(__file__).parent / '../examples/test1/exp1_001_a.bmp'
file_b = pathlib.Path(__file__).parent / '../examples/test1/exp1_001_b.bmp'
test_file = pathlib.Path(__file__).parent / 'test_tools.png'
def test_imread(image_file=file_a):
a = imread(image_file)
assert a.shape == (369, 511)
assert a[0, 0] == 8
assert a[-1, -1] == 15
def test_display_vector_field(file_a=file_a, file_b=file_b):
a = imread(file_a)
b = imread(file_b)
window_size = 32
overlap = 16
search_area_size = 40
u, v, s2n = extended_search_area_piv(a, b, window_size,
search_area_size=search_area_size,
overlap=overlap,
correlation_method='circular',
normalized_correlation=False)
x, y = get_coordinates(a.shape, search_area_size=search_area_size,
overlap=overlap)
x, y, u, v = transform_coordinates(x, y, u, v)
save(x, y, u, v,
np.zeros_like(x), 'tmp.txt')
fig, ax = plt.subplots(figsize=(6,6))
display_vector_field('tmp.txt', on_img=True, image_name=file_a, ax=ax)
fig.savefig('./tmp.png')
res = compare.compare_images('./tmp.png', test_file, 0.001)
assert res is None
| gpl-3.0 |
NikolaYolov/invenio_backup | modules/websubmit/lib/websubmitadmin_regression_tests.py | 13 | 2876 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit Admin Regression Test Suite."""
__revision__ = "$Id$"
import unittest
from invenio.config import CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
class WebSubmitAdminWebPagesAvailabilityTest(unittest.TestCase):
"""Check WebSubmit Admin web pages whether they are up or not."""
def test_websubmit_admin_interface_pages_availability(self):
"""websubmitadmin - availability of WebSubmit Admin interface pages"""
baseurl = CFG_SITE_URL + '/admin/websubmit/websubmitadmin.py/'
_exports = ['', 'showall', 'doctypelist', 'doctypeadd',
'doctyperemove', 'actionlist', 'jschecklist',
'elementlist', 'functionlist']
error_messages = []
for url in [baseurl + page for page in _exports]:
# first try as guest:
error_messages.extend(test_web_page_content(url,
username='guest',
expected_text=
'Authorization failure'))
# then try as admin:
error_messages.extend(test_web_page_content(url,
username='admin'))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_websubmit_admin_guide_availability(self):
"""websubmitadmin - availability of WebSubmit Admin guide pages"""
url = CFG_SITE_URL + '/help/admin/websubmit-admin-guide'
error_messages = test_web_page_content(url,
expected_text="WebSubmit Admin Guide")
if error_messages:
self.fail(merge_error_messages(error_messages))
return
TEST_SUITE = make_test_suite(WebSubmitAdminWebPagesAvailabilityTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 |
ukBaz/python-bluezero | tests/test_dbus_tools.py | 1 | 7790 | import subprocess
import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
import tests.obj_data
from bluezero import constants
class TestDbusModuleCalls(unittest.TestCase):
"""
Testing things that use the Dbus module
"""
experimental = True
bluetooth_service_experimental = b'\xe2\x97\x8f bluetooth.service - Bluetooth service\n Loaded: loaded (/lib/systemd/system/bluetooth.service; enabled; vendor preset: enabled)\n Active: active (running) since Fri 2017-10-13 21:48:58 UTC; 1 day 23h ago\n Docs: man:bluetoothd(8)\n Main PID: 530 (bluetoothd)\n Status: "Running"\n CGroup: /system.slice/bluetooth.service\n \xe2\x94\x94\xe2\x94\x80530 /usr/lib/bluetooth/bluetoothd --experimental\n\nOct 13 21:48:58 RPi3 systemd[1]: Starting Bluetooth service...\nOct 13 21:48:58 RPi3 bluetoothd[530]: Bluetooth daemon 5.43\nOct 13 21:48:58 RPi3 systemd[1]: Started Bluetooth service.\nOct 13 21:48:58 RPi3 bluetoothd[530]: Bluetooth management interface 1.14 initialized\nOct 13 21:48:58 RPi3 bluetoothd[530]: Failed to obtain handles for "Service Changed" characteristic\nOct 13 21:48:58 RPi3 bluetoothd[530]: Endpoint registered: sender=:1.10 path=/MediaEndpoint/A2DPSource\nOct 13 21:48:58 RPi3 bluetoothd[530]: Endpoint registered: sender=:1.10 path=/MediaEndpoint/A2DPSink\n'
bluetooth_service_normal = b'\xe2\x97\x8f bluetooth.service - Bluetooth service\n Loaded: loaded (/lib/systemd/system/bluetooth.service; enabled; vendor preset: enabled)\n Active: active (running) since Fri 2017-10-13 21:48:58 UTC; 1 day 23h ago\n Docs: man:bluetoothd(8)\n Main PID: 530 (bluetoothd)\n Status: "Running"\n CGroup: /system.slice/bluetooth.service\n \xe2\x94\x94\xe2\x94\x80530 /usr/lib/bluetooth/bluetoothd\n\nOct 13 21:48:58 RPi3 systemd[1]: Starting Bluetooth service...\nOct 13 21:48:58 RPi3 bluetoothd[530]: Bluetooth daemon 5.43\nOct 13 21:48:58 RPi3 systemd[1]: Started Bluetooth service.\nOct 13 21:48:58 RPi3 bluetoothd[530]: Bluetooth management interface 1.14 initialized\nOct 13 21:48:58 RPi3 bluetoothd[530]: Failed to obtain handles for "Service Changed" characteristic\nOct 13 21:48:58 RPi3 bluetoothd[530]: Endpoint registered: sender=:1.10 path=/MediaEndpoint/A2DPSource\nOct 13 21:48:58 RPi3 bluetoothd[530]: Endpoint registered: sender=:1.10 path=/MediaEndpoint/A2DPSink\n'
def get_bluetooth_service(self, cmd, shell):
if TestDbusModuleCalls.experimental:
return TestDbusModuleCalls.bluetooth_service_experimental
else:
return TestDbusModuleCalls.bluetooth_service_normal
def setUp(self):
"""
Patch the DBus module
:return:
"""
self.dbus_mock = MagicMock()
self.mainloop_mock = MagicMock()
self.gobject_mock = MagicMock()
self.process_mock = MagicMock()
modules = {
'dbus': self.dbus_mock,
'dbus.mainloop.glib': self.mainloop_mock,
'gi.repository': self.gobject_mock,
'subprocess': self.process_mock
}
self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits
self.process_mock.check_output = self.get_bluetooth_service
self.process_mock.run.return_value = subprocess.CompletedProcess(
args=['bluetoothctl', '-v'], returncode=0, stdout=b'bluetoothctl: 5.53\n', stderr=b'')
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
from bluezero import dbus_tools
self.module_under_test = dbus_tools
def tearDown(self):
self.module_patcher.stop()
def test_uuid_path_gatt(self):
dbus_full_path = self.module_under_test.get_dbus_path(adapter='00:00:00:00:5A:AD',
device='F7:17:E4:09:C0:C6',
service='e95df2d8-251d-470a-a062-fa1922dfa9a8',
characteristic='e95d9715-251d-470a-a062-fa1922dfa9a8',
descriptor='00002902-0000-1000-8000-00805f9b34fb')
expected_result = '/org/bluez/hci0/dev_F7_17_E4_09_C0_C6/service0031/char0035/desc0037'
self.assertEqual(dbus_full_path, expected_result)
def test_bad_path(self):
path_found = self.module_under_test.get_dbus_path(adapter='00:00:00:00:5A:C6',
device='F7:17:E4:09:C0:XX',
service='e95df2d8-251d-470a-a062-fa1922dfa9a8')
expected_path = None
self.assertEqual(path_found, expected_path)
def test_get_iface_from_path(self):
my_iface = self.module_under_test.get_iface(adapter='00:00:00:00:5A:AD',
device='F7:17:E4:09:C0:C6',
service='e95df2d8-251d-470a-a062-fa1922dfa9a8',
characteristic='e95d9715-251d-470a-a062-fa1922dfa9a8',
descriptor='00002902-0000-1000-8000-00805f9b34fb')
self.assertEqual(constants.GATT_DESC_IFACE, my_iface)
def test_profile_path(self):
my_iface = self.module_under_test.get_profile_path(adapter='00:00:00:00:5A:AD',
device='F7:17:E4:09:C0:C6',
profile='e95df2d8-251d-470a-a062-fa1922dfa9a8')
self.assertEqual(None, my_iface)
def test_bluez_version(self):
bluez_ver = self.module_under_test.bluez_version()
self.assertEqual('5.53', bluez_ver)
def test_bluez_service_experimental(self):
TestDbusModuleCalls.experimental = True
bluez_exper = self.module_under_test.bluez_experimental_mode()
self.assertTrue(bluez_exper)
def test_bluez_service_normal(self):
TestDbusModuleCalls.experimental = False
bluez_exper = self.module_under_test.bluez_experimental_mode()
self.assertFalse(bluez_exper)
def test_get_device_address_from_dbus_path(self):
"""
Get mac address fromo any given dbus_path that includes
"dev_xx_xx_xx_xx"
"""
test_data = [
['/org/bluez/hci0/dev_EB_F6_95_27_84_A0', 'EB:F6:95:27:84:A0'],
['/org/bluez/hci0', ''],
['/org/bluez/hci0/dev_EB_F6_95_27_84_A0/player0',
'EB:F6:95:27:84:A0']
]
for i in range(0, len(test_data)):
with self.subTest(i=i):
self.assertEqual(
test_data[i][1],
self.module_under_test.get_device_address_from_dbus_path(
test_data[i][0]
))
def test_mac_addr_deprecated(self):
with patch('logging.Logger.warning') as logger:
self.module_under_test.get_mac_addr_from_dbus_path(
'/org/bluez/hci0/dev_EB_F6_95_27_84_A0')
logger.assert_called_once_with('get_mac_addr_from_dbus_path has '
'been deprecated and has been '
'replaced with '
'get_device_address_from_dbus_path')
def test_get_device_address(self):
expected = [{'E4:43:33:7E:54:1C': 'BBC micro:bit [pugit]'}]
result = self.module_under_test.get_device_addresses('pugit')
self.assertTrue(isinstance(result, list))
self.assertDictEqual(expected[0], result[0])
if __name__ == '__main__':
unittest.main()
| mit |
sysadmind/ansible-modules-extras | system/capabilities.py | 79 | 6980 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Nate Coraor <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: capabilities
short_description: Manage Linux capabilities
description:
- This module manipulates files privileges using the Linux capabilities(7) system.
version_added: "1.6"
options:
path:
description:
- Specifies the path to the file to be managed.
required: true
default: null
capability:
description:
- Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
required: true
default: null
aliases: [ 'cap' ]
state:
description:
- Whether the entry should be present or absent in the file's capabilities.
choices: [ "present", "absent" ]
default: present
notes:
- The capabilities system will automatically transform operators and flags
into the effective set, so (for example, cap_foo=ep will probably become
cap_foo+ep). This module does not attempt to determine the final operator
and flags to compare, so you will want to ensure that your capabilities
argument matches the final capabilities.
requirements: []
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Set cap_sys_chroot+ep on /foo
- capabilities: path=/foo capability=cap_sys_chroot+ep state=present
# Remove cap_net_bind_service from /bar
- capabilities: path=/bar capability=cap_net_bind_service state=absent
'''
OPS = ( '=', '-', '+' )
# ==============================================================
import os
import tempfile
import re
class CapabilitiesModule(object):
platform = 'Linux'
distribution = None
def __init__(self, module):
self.module = module
self.path = module.params['path'].strip()
self.capability = module.params['capability'].strip().lower()
self.state = module.params['state']
self.getcap_cmd = module.get_bin_path('getcap', required=True)
self.setcap_cmd = module.get_bin_path('setcap', required=True)
self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present')
self.run()
def run(self):
current = self.getcap(self.path)
caps = [ cap[0] for cap in current ]
if self.state == 'present' and self.capability_tup not in current:
# need to add capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list if it's already set (but op/flags differ)
current = filter(lambda x: x[0] != self.capability_tup[0], current)
# add new cap with correct op/flags
current.append( self.capability_tup )
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
elif self.state == 'absent' and self.capability_tup[0] in caps:
# need to remove capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list and then set current list
current = filter(lambda x: x[0] != self.capability_tup[0], current)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
self.module.exit_json(changed=False, state=self.state)
def getcap(self, path):
rval = []
cmd = "%s -v %s" % (self.getcap_cmd, path)
rc, stdout, stderr = self.module.run_command(cmd)
# If file xattrs are set but no caps are set the output will be:
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
# If the file does not eixst the output will be (with rc == 0...):
# '/foo (No such file or directory)'
if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
caps = stdout.split(' =')[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
# comma-separated list, so we have to parse that
if ',' in cap:
cap_group = cap.split(',')
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
for subcap in cap_group:
rval.append( ( subcap, op, flags ) )
else:
rval.append(self._parse_cap(cap))
return rval
def setcap(self, path, caps):
caps = ' '.join([ ''.join(cap) for cap in caps ])
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
else:
return stdout
def _parse_cap(self, cap, op_required=True):
opind = -1
try:
i = 0
while opind == -1:
opind = cap.find(OPS[i])
i += 1
except:
if op_required:
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
else:
return (cap, None, None)
op = cap[opind]
cap, flags = cap.split(op)
return (cap, op, flags)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec = dict(
path = dict(aliases=['key'], required=True),
capability = dict(aliases=['cap'], required=True),
state = dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True
)
CapabilitiesModule(module)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
ds-hwang/chromium-crosswalk | third_party/WebKit/Source/build/scripts/make_style_shorthands.py | 65 | 3163 | #!/usr/bin/env python
# Copyright (C) 2013 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import defaultdict
import sys
import css_properties
import in_generator
from name_utilities import lower_first
import template_expander
class StylePropertyShorthandWriter(css_properties.CSSProperties):
class_name = 'StylePropertyShorthand'
def __init__(self, in_file_path):
super(StylePropertyShorthandWriter, self).__init__(in_file_path)
self._outputs = {
('StylePropertyShorthand.cpp'): self.generate_style_property_shorthand_cpp,
('StylePropertyShorthand.h'): self.generate_style_property_shorthand_h}
self._longhand_dictionary = defaultdict(list)
self._properties = {property_id: property for property_id, property in self._properties.items() if property['longhands']}
for property in self._properties.values():
property['longhand_property_ids'] = map(css_properties.css_name_to_enum, property['longhands'].split(';'))
for longhand in property['longhand_property_ids']:
self._longhand_dictionary[longhand].append(property)
@template_expander.use_jinja('StylePropertyShorthand.cpp.tmpl')
def generate_style_property_shorthand_cpp(self):
return {
'properties': self._properties,
'longhands_dictionary': self._longhand_dictionary,
}
@template_expander.use_jinja('StylePropertyShorthand.h.tmpl')
def generate_style_property_shorthand_h(self):
return {
'properties': self._properties,
}
if __name__ == '__main__':
in_generator.Maker(StylePropertyShorthandWriter).main(sys.argv)
| bsd-3-clause |
stevenle/googletv-anymote | googletv/proto/keycodes_pb2.py | 1 | 27774 | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='keycodes.proto',
package='',
serialized_pb='\n\x0ekeycodes.proto*\xe0\x16\n\x04\x43ode\x12\x13\n\x0fKEYCODE_UNKNOWN\x10\x00\x12\x15\n\x11KEYCODE_SOFT_LEFT\x10\x01\x12\x16\n\x12KEYCODE_SOFT_RIGHT\x10\x02\x12\x10\n\x0cKEYCODE_HOME\x10\x03\x12\x10\n\x0cKEYCODE_BACK\x10\x04\x12\x10\n\x0cKEYCODE_CALL\x10\x05\x12\r\n\tKEYCODE_0\x10\x07\x12\r\n\tKEYCODE_1\x10\x08\x12\r\n\tKEYCODE_2\x10\t\x12\r\n\tKEYCODE_3\x10\n\x12\r\n\tKEYCODE_4\x10\x0b\x12\r\n\tKEYCODE_5\x10\x0c\x12\r\n\tKEYCODE_6\x10\r\x12\r\n\tKEYCODE_7\x10\x0e\x12\r\n\tKEYCODE_8\x10\x0f\x12\r\n\tKEYCODE_9\x10\x10\x12\x10\n\x0cKEYCODE_STAR\x10\x11\x12\x11\n\rKEYCODE_POUND\x10\x12\x12\x13\n\x0fKEYCODE_DPAD_UP\x10\x13\x12\x15\n\x11KEYCODE_DPAD_DOWN\x10\x14\x12\x15\n\x11KEYCODE_DPAD_LEFT\x10\x15\x12\x16\n\x12KEYCODE_DPAD_RIGHT\x10\x16\x12\x17\n\x13KEYCODE_DPAD_CENTER\x10\x17\x12\x15\n\x11KEYCODE_VOLUME_UP\x10\x18\x12\x17\n\x13KEYCODE_VOLUME_DOWN\x10\x19\x12\x11\n\rKEYCODE_POWER\x10\x1a\x12\x12\n\x0eKEYCODE_CAMERA\x10\x1b\x12\r\n\tKEYCODE_A\x10\x1d\x12\r\n\tKEYCODE_B\x10\x1e\x12\r\n\tKEYCODE_C\x10\x1f\x12\r\n\tKEYCODE_D\x10 \x12\r\n\tKEYCODE_E\x10!\x12\r\n\tKEYCODE_F\x10\"\x12\r\n\tKEYCODE_G\x10#\x12\r\n\tKEYCODE_H\x10$\x12\r\n\tKEYCODE_I\x10%\x12\r\n\tKEYCODE_J\x10&\x12\r\n\tKEYCODE_K\x10\'\x12\r\n\tKEYCODE_L\x10(\x12\r\n\tKEYCODE_M\x10)\x12\r\n\tKEYCODE_N\x10*\x12\r\n\tKEYCODE_O\x10+\x12\r\n\tKEYCODE_P\x10,\x12\r\n\tKEYCODE_Q\x10-\x12\r\n\tKEYCODE_R\x10.\x12\r\n\tKEYCODE_S\x10/\x12\r\n\tKEYCODE_T\x10\x30\x12\r\n\tKEYCODE_U\x10\x31\x12\r\n\tKEYCODE_V\x10\x32\x12\r\n\tKEYCODE_W\x10\x33\x12\r\n\tKEYCODE_X\x10\x34\x12\r\n\tKEYCODE_Y\x10\x35\x12\r\n\tKEYCODE_Z\x10\x36\x12\x11\n\rKEYCODE_COMMA\x10\x37\x12\x12\n\x0eKEYCODE_PERIOD\x10\x38\x12\x14\n\x10KEYCODE_ALT_LEFT\x10\x39\x12\x15\n\x11KEYCODE_ALT_RIGHT\x10:\x12\x16\n\x12KEYCODE_SHIFT_LEFT\x10;\x12\x17\n\x13KEYCODE_SHIFT_RIGHT\x10<\x12\x0f\n\x0bKEYCODE_TAB\x10=\x12\x11\n\rKEYCODE_SPACE\x10>\x12\x14\n\x10KEYCODE_EXPLORER\x10@\x12\x11\n\rKEYCODE_ENTER\x10\x42\x12\x0f\n\x0bKEYCODE_DEL\x10\x43\x12\x11\n\rKEYCODE_GRAVE\x10\x44\x12\x11\n\rKEYCODE_MINUS\x10\x45\x12\x12\n\x0eKEYCODE_EQUALS\x10\x46\x12\x18\n\x14KEYCODE_LEFT_BRACKET\x10G\x12\x19\n\x15KEYCODE_RIGHT_BRACKET\x10H\x12\x15\n\x11KEYCODE_BACKSLASH\x10I\x12\x15\n\x11KEYCODE_SEMICOLON\x10J\x12\x16\n\x12KEYCODE_APOSTROPHE\x10K\x12\x11\n\rKEYCODE_SLASH\x10L\x12\x0e\n\nKEYCODE_AT\x10M\x12\x11\n\rKEYCODE_FOCUS\x10P\x12\x10\n\x0cKEYCODE_PLUS\x10Q\x12\x10\n\x0cKEYCODE_MENU\x10R\x12\x12\n\x0eKEYCODE_SEARCH\x10T\x12\x1c\n\x18KEYCODE_MEDIA_PLAY_PAUSE\x10U\x12\x16\n\x12KEYCODE_MEDIA_STOP\x10V\x12\x16\n\x12KEYCODE_MEDIA_NEXT\x10W\x12\x1a\n\x16KEYCODE_MEDIA_PREVIOUS\x10X\x12\x18\n\x14KEYCODE_MEDIA_REWIND\x10Y\x12\x1e\n\x1aKEYCODE_MEDIA_FAST_FORWARD\x10Z\x12\x10\n\x0cKEYCODE_MUTE\x10[\x12\x15\n\x11KEYCODE_CTRL_LEFT\x10\\\x12\x16\n\x12KEYCODE_CTRL_RIGHT\x10]\x12\x12\n\x0eKEYCODE_INSERT\x10^\x12\x11\n\rKEYCODE_PAUSE\x10_\x12\x13\n\x0fKEYCODE_PAGE_UP\x10`\x12\x15\n\x11KEYCODE_PAGE_DOWN\x10\x61\x12\x18\n\x14KEYCODE_PRINT_SCREEN\x10\x62\x12\x10\n\x0cKEYCODE_INFO\x10g\x12\x12\n\x0eKEYCODE_WINDOW\x10h\x12\x14\n\x10KEYCODE_BOOKMARK\x10n\x12\x15\n\x11KEYCODE_CAPS_LOCK\x10o\x12\x12\n\x0eKEYCODE_ESCAPE\x10p\x12\x15\n\x11KEYCODE_META_LEFT\x10q\x12\x16\n\x12KEYCODE_META_RIGHT\x10r\x12\x13\n\x0fKEYCODE_ZOOM_IN\x10s\x12\x14\n\x10KEYCODE_ZOOM_OUT\x10t\x12\x16\n\x12KEYCODE_CHANNEL_UP\x10u\x12\x18\n\x14KEYCODE_CHANNEL_DOWN\x10v\x12\x10\n\x0cKEYCODE_LIVE\x10x\x12\x0f\n\x0bKEYCODE_DVR\x10y\x12\x11\n\rKEYCODE_GUIDE\x10z\x12\x1b\n\x17KEYCODE_MEDIA_SKIP_BACK\x10{\x12\x1e\n\x1aKEYCODE_MEDIA_SKIP_FORWARD\x10|\x12\x18\n\x14KEYCODE_MEDIA_RECORD\x10}\x12\x16\n\x12KEYCODE_MEDIA_PLAY\x10~\x12\x15\n\x10KEYCODE_PROG_RED\x10\x80\x01\x12\x17\n\x12KEYCODE_PROG_GREEN\x10\x81\x01\x12\x18\n\x13KEYCODE_PROG_YELLOW\x10\x82\x01\x12\x16\n\x11KEYCODE_PROG_BLUE\x10\x83\x01\x12\x15\n\x10KEYCODE_BD_POWER\x10\x84\x01\x12\x15\n\x10KEYCODE_BD_INPUT\x10\x85\x01\x12\x16\n\x11KEYCODE_STB_POWER\x10\x86\x01\x12\x16\n\x11KEYCODE_STB_INPUT\x10\x87\x01\x12\x15\n\x10KEYCODE_STB_MENU\x10\x88\x01\x12\x15\n\x10KEYCODE_TV_POWER\x10\x89\x01\x12\x15\n\x10KEYCODE_TV_INPUT\x10\x8a\x01\x12\x16\n\x11KEYCODE_AVR_POWER\x10\x8b\x01\x12\x16\n\x11KEYCODE_AVR_INPUT\x10\x8c\x01\x12\x12\n\rKEYCODE_AUDIO\x10\x8d\x01\x12\x12\n\rKEYCODE_EJECT\x10\x8e\x01\x12\x1a\n\x15KEYCODE_BD_POPUP_MENU\x10\x8f\x01\x12\x18\n\x13KEYCODE_BD_TOP_MENU\x10\x90\x01\x12\x15\n\x10KEYCODE_SETTINGS\x10\x91\x01\x12\x12\n\rKEYCODE_SETUP\x10\x92\x01\x12\x0e\n\tBTN_FIRST\x10\x80\x02\x12\r\n\x08\x42TN_MISC\x10\x80\x02\x12\n\n\x05\x42TN_0\x10\x80\x02\x12\n\n\x05\x42TN_1\x10\x81\x02\x12\n\n\x05\x42TN_2\x10\x82\x02\x12\n\n\x05\x42TN_3\x10\x83\x02\x12\n\n\x05\x42TN_4\x10\x84\x02\x12\n\n\x05\x42TN_5\x10\x85\x02\x12\n\n\x05\x42TN_6\x10\x86\x02\x12\n\n\x05\x42TN_7\x10\x87\x02\x12\n\n\x05\x42TN_8\x10\x88\x02\x12\n\n\x05\x42TN_9\x10\x89\x02\x12\x0e\n\tBTN_MOUSE\x10\x90\x02\x12\r\n\x08\x42TN_LEFT\x10\x90\x02\x12\x0e\n\tBTN_RIGHT\x10\x91\x02\x12\x0f\n\nBTN_MIDDLE\x10\x92\x02\x12\r\n\x08\x42TN_SIDE\x10\x93\x02\x12\x0e\n\tBTN_EXTRA\x10\x94\x02\x12\x10\n\x0b\x42TN_FORWARD\x10\x95\x02\x12\r\n\x08\x42TN_BACK\x10\x96\x02\x12\r\n\x08\x42TN_TASK\x10\x97\x02*\x1a\n\x06\x41\x63tion\x12\x06\n\x02UP\x10\x00\x12\x08\n\x04\x44OWN\x10\x01\x42\x1b\n\x12\x63om.google.anymoteB\x03KeyH\x03')
_CODE = descriptor.EnumDescriptor(
name='Code',
full_name='Code',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='KEYCODE_UNKNOWN', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SOFT_LEFT', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SOFT_RIGHT', index=2, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_HOME', index=3, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BACK', index=4, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CALL', index=5, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_0', index=6, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_1', index=7, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_2', index=8, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_3', index=9, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_4', index=10, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_5', index=11, number=12,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_6', index=12, number=13,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_7', index=13, number=14,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_8', index=14, number=15,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_9', index=15, number=16,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_STAR', index=16, number=17,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_POUND', index=17, number=18,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DPAD_UP', index=18, number=19,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DPAD_DOWN', index=19, number=20,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DPAD_LEFT', index=20, number=21,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DPAD_RIGHT', index=21, number=22,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DPAD_CENTER', index=22, number=23,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_VOLUME_UP', index=23, number=24,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_VOLUME_DOWN', index=24, number=25,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_POWER', index=25, number=26,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CAMERA', index=26, number=27,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_A', index=27, number=29,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_B', index=28, number=30,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_C', index=29, number=31,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_D', index=30, number=32,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_E', index=31, number=33,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_F', index=32, number=34,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_G', index=33, number=35,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_H', index=34, number=36,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_I', index=35, number=37,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_J', index=36, number=38,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_K', index=37, number=39,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_L', index=38, number=40,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_M', index=39, number=41,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_N', index=40, number=42,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_O', index=41, number=43,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_P', index=42, number=44,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_Q', index=43, number=45,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_R', index=44, number=46,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_S', index=45, number=47,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_T', index=46, number=48,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_U', index=47, number=49,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_V', index=48, number=50,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_W', index=49, number=51,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_X', index=50, number=52,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_Y', index=51, number=53,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_Z', index=52, number=54,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_COMMA', index=53, number=55,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PERIOD', index=54, number=56,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_ALT_LEFT', index=55, number=57,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_ALT_RIGHT', index=56, number=58,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SHIFT_LEFT', index=57, number=59,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SHIFT_RIGHT', index=58, number=60,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_TAB', index=59, number=61,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SPACE', index=60, number=62,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_EXPLORER', index=61, number=64,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_ENTER', index=62, number=66,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DEL', index=63, number=67,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_GRAVE', index=64, number=68,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MINUS', index=65, number=69,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_EQUALS', index=66, number=70,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_LEFT_BRACKET', index=67, number=71,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_RIGHT_BRACKET', index=68, number=72,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BACKSLASH', index=69, number=73,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SEMICOLON', index=70, number=74,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_APOSTROPHE', index=71, number=75,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SLASH', index=72, number=76,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_AT', index=73, number=77,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_FOCUS', index=74, number=80,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PLUS', index=75, number=81,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MENU', index=76, number=82,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SEARCH', index=77, number=84,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_PLAY_PAUSE', index=78, number=85,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_STOP', index=79, number=86,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_NEXT', index=80, number=87,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_PREVIOUS', index=81, number=88,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_REWIND', index=82, number=89,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_FAST_FORWARD', index=83, number=90,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MUTE', index=84, number=91,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CTRL_LEFT', index=85, number=92,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CTRL_RIGHT', index=86, number=93,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_INSERT', index=87, number=94,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PAUSE', index=88, number=95,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PAGE_UP', index=89, number=96,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PAGE_DOWN', index=90, number=97,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PRINT_SCREEN', index=91, number=98,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_INFO', index=92, number=103,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_WINDOW', index=93, number=104,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BOOKMARK', index=94, number=110,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CAPS_LOCK', index=95, number=111,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_ESCAPE', index=96, number=112,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_META_LEFT', index=97, number=113,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_META_RIGHT', index=98, number=114,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_ZOOM_IN', index=99, number=115,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_ZOOM_OUT', index=100, number=116,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CHANNEL_UP', index=101, number=117,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_CHANNEL_DOWN', index=102, number=118,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_LIVE', index=103, number=120,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_DVR', index=104, number=121,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_GUIDE', index=105, number=122,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_SKIP_BACK', index=106, number=123,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_SKIP_FORWARD', index=107, number=124,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_RECORD', index=108, number=125,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_MEDIA_PLAY', index=109, number=126,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PROG_RED', index=110, number=128,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PROG_GREEN', index=111, number=129,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PROG_YELLOW', index=112, number=130,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_PROG_BLUE', index=113, number=131,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BD_POWER', index=114, number=132,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BD_INPUT', index=115, number=133,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_STB_POWER', index=116, number=134,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_STB_INPUT', index=117, number=135,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_STB_MENU', index=118, number=136,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_TV_POWER', index=119, number=137,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_TV_INPUT', index=120, number=138,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_AVR_POWER', index=121, number=139,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_AVR_INPUT', index=122, number=140,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_AUDIO', index=123, number=141,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_EJECT', index=124, number=142,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BD_POPUP_MENU', index=125, number=143,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_BD_TOP_MENU', index=126, number=144,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SETTINGS', index=127, number=145,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='KEYCODE_SETUP', index=128, number=146,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_FIRST', index=129, number=256,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_MISC', index=130, number=256,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_0', index=131, number=256,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_1', index=132, number=257,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_2', index=133, number=258,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_3', index=134, number=259,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_4', index=135, number=260,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_5', index=136, number=261,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_6', index=137, number=262,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_7', index=138, number=263,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_8', index=139, number=264,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_9', index=140, number=265,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_MOUSE', index=141, number=272,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_LEFT', index=142, number=272,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_RIGHT', index=143, number=273,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_MIDDLE', index=144, number=274,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_SIDE', index=145, number=275,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_EXTRA', index=146, number=276,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_FORWARD', index=147, number=277,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_BACK', index=148, number=278,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='BTN_TASK', index=149, number=279,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=19,
serialized_end=2931,
)
_ACTION = descriptor.EnumDescriptor(
name='Action',
full_name='Action',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='UP', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='DOWN', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2933,
serialized_end=2959,
)
KEYCODE_UNKNOWN = 0
KEYCODE_SOFT_LEFT = 1
KEYCODE_SOFT_RIGHT = 2
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_CALL = 5
KEYCODE_0 = 7
KEYCODE_1 = 8
KEYCODE_2 = 9
KEYCODE_3 = 10
KEYCODE_4 = 11
KEYCODE_5 = 12
KEYCODE_6 = 13
KEYCODE_7 = 14
KEYCODE_8 = 15
KEYCODE_9 = 16
KEYCODE_STAR = 17
KEYCODE_POUND = 18
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_LEFT = 21
KEYCODE_DPAD_RIGHT = 22
KEYCODE_DPAD_CENTER = 23
KEYCODE_VOLUME_UP = 24
KEYCODE_VOLUME_DOWN = 25
KEYCODE_POWER = 26
KEYCODE_CAMERA = 27
KEYCODE_A = 29
KEYCODE_B = 30
KEYCODE_C = 31
KEYCODE_D = 32
KEYCODE_E = 33
KEYCODE_F = 34
KEYCODE_G = 35
KEYCODE_H = 36
KEYCODE_I = 37
KEYCODE_J = 38
KEYCODE_K = 39
KEYCODE_L = 40
KEYCODE_M = 41
KEYCODE_N = 42
KEYCODE_O = 43
KEYCODE_P = 44
KEYCODE_Q = 45
KEYCODE_R = 46
KEYCODE_S = 47
KEYCODE_T = 48
KEYCODE_U = 49
KEYCODE_V = 50
KEYCODE_W = 51
KEYCODE_X = 52
KEYCODE_Y = 53
KEYCODE_Z = 54
KEYCODE_COMMA = 55
KEYCODE_PERIOD = 56
KEYCODE_ALT_LEFT = 57
KEYCODE_ALT_RIGHT = 58
KEYCODE_SHIFT_LEFT = 59
KEYCODE_SHIFT_RIGHT = 60
KEYCODE_TAB = 61
KEYCODE_SPACE = 62
KEYCODE_EXPLORER = 64
KEYCODE_ENTER = 66
KEYCODE_DEL = 67
KEYCODE_GRAVE = 68
KEYCODE_MINUS = 69
KEYCODE_EQUALS = 70
KEYCODE_LEFT_BRACKET = 71
KEYCODE_RIGHT_BRACKET = 72
KEYCODE_BACKSLASH = 73
KEYCODE_SEMICOLON = 74
KEYCODE_APOSTROPHE = 75
KEYCODE_SLASH = 76
KEYCODE_AT = 77
KEYCODE_FOCUS = 80
KEYCODE_PLUS = 81
KEYCODE_MENU = 82
KEYCODE_SEARCH = 84
KEYCODE_MEDIA_PLAY_PAUSE = 85
KEYCODE_MEDIA_STOP = 86
KEYCODE_MEDIA_NEXT = 87
KEYCODE_MEDIA_PREVIOUS = 88
KEYCODE_MEDIA_REWIND = 89
KEYCODE_MEDIA_FAST_FORWARD = 90
KEYCODE_MUTE = 91
KEYCODE_CTRL_LEFT = 92
KEYCODE_CTRL_RIGHT = 93
KEYCODE_INSERT = 94
KEYCODE_PAUSE = 95
KEYCODE_PAGE_UP = 96
KEYCODE_PAGE_DOWN = 97
KEYCODE_PRINT_SCREEN = 98
KEYCODE_INFO = 103
KEYCODE_WINDOW = 104
KEYCODE_BOOKMARK = 110
KEYCODE_CAPS_LOCK = 111
KEYCODE_ESCAPE = 112
KEYCODE_META_LEFT = 113
KEYCODE_META_RIGHT = 114
KEYCODE_ZOOM_IN = 115
KEYCODE_ZOOM_OUT = 116
KEYCODE_CHANNEL_UP = 117
KEYCODE_CHANNEL_DOWN = 118
KEYCODE_LIVE = 120
KEYCODE_DVR = 121
KEYCODE_GUIDE = 122
KEYCODE_MEDIA_SKIP_BACK = 123
KEYCODE_MEDIA_SKIP_FORWARD = 124
KEYCODE_MEDIA_RECORD = 125
KEYCODE_MEDIA_PLAY = 126
KEYCODE_PROG_RED = 128
KEYCODE_PROG_GREEN = 129
KEYCODE_PROG_YELLOW = 130
KEYCODE_PROG_BLUE = 131
KEYCODE_BD_POWER = 132
KEYCODE_BD_INPUT = 133
KEYCODE_STB_POWER = 134
KEYCODE_STB_INPUT = 135
KEYCODE_STB_MENU = 136
KEYCODE_TV_POWER = 137
KEYCODE_TV_INPUT = 138
KEYCODE_AVR_POWER = 139
KEYCODE_AVR_INPUT = 140
KEYCODE_AUDIO = 141
KEYCODE_EJECT = 142
KEYCODE_BD_POPUP_MENU = 143
KEYCODE_BD_TOP_MENU = 144
KEYCODE_SETTINGS = 145
KEYCODE_SETUP = 146
BTN_FIRST = 256
BTN_MISC = 256
BTN_0 = 256
BTN_1 = 257
BTN_2 = 258
BTN_3 = 259
BTN_4 = 260
BTN_5 = 261
BTN_6 = 262
BTN_7 = 263
BTN_8 = 264
BTN_9 = 265
BTN_MOUSE = 272
BTN_LEFT = 272
BTN_RIGHT = 273
BTN_MIDDLE = 274
BTN_SIDE = 275
BTN_EXTRA = 276
BTN_FORWARD = 277
BTN_BACK = 278
BTN_TASK = 279
UP = 0
DOWN = 1
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
jonzobrist/Percona-Server-5.1 | python-for-subunit2junitxml/testtools/tests/test_compat.py | 62 | 9528 | # Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for miscellaneous compatibility functions"""
import linecache
import os
import sys
import tempfile
import traceback
import testtools
from testtools.compat import (
_b,
_detect_encoding,
_get_source_encoding,
_u,
unicode_output_stream,
)
from testtools.matchers import (
MatchesException,
Not,
Raises,
)
class TestDetectEncoding(testtools.TestCase):
"""Test detection of Python source encodings"""
def _check_encoding(self, expected, lines, possibly_invalid=False):
"""Check lines are valid Python and encoding is as expected"""
if not possibly_invalid:
compile(_b("".join(lines)), "<str>", "exec")
encoding = _detect_encoding(lines)
self.assertEqual(expected, encoding,
"Encoding %r expected but got %r from lines %r" %
(expected, encoding, lines))
def test_examples_from_pep(self):
"""Check the examples given in PEP 263 all work as specified
See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
"""
# With interpreter binary and using Emacs style file encoding comment:
self._check_encoding("latin-1", (
"#!/usr/bin/python\n",
"# -*- coding: latin-1 -*-\n",
"import os, sys\n"))
self._check_encoding("iso-8859-15", (
"#!/usr/bin/python\n",
"# -*- coding: iso-8859-15 -*-\n",
"import os, sys\n"))
self._check_encoding("ascii", (
"#!/usr/bin/python\n",
"# -*- coding: ascii -*-\n",
"import os, sys\n"))
# Without interpreter line, using plain text:
self._check_encoding("utf-8", (
"# This Python file uses the following encoding: utf-8\n",
"import os, sys\n"))
# Text editors might have different ways of defining the file's
# encoding, e.g.
self._check_encoding("latin-1", (
"#!/usr/local/bin/python\n",
"# coding: latin-1\n",
"import os, sys\n"))
# Without encoding comment, Python's parser will assume ASCII text:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"import os, sys\n"))
# Encoding comments which don't work:
# Missing "coding:" prefix:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"# latin-1\n",
"import os, sys\n"))
# Encoding comment not on line 1 or 2:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"#\n",
"# -*- coding: latin-1 -*-\n",
"import os, sys\n"))
# Unsupported encoding:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"# -*- coding: utf-42 -*-\n",
"import os, sys\n"),
possibly_invalid=True)
def test_bom(self):
"""Test the UTF-8 BOM counts as an encoding declaration"""
self._check_encoding("utf-8", (
"\xef\xbb\xbfimport sys\n",
))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# File encoding: UTF-8\n",
))
self._check_encoding("utf-8", (
'\xef\xbb\xbf"""Module docstring\n',
'\xef\xbb\xbfThat should just be a ZWNB"""\n'))
self._check_encoding("latin-1", (
'"""Is this coding: latin-1 or coding: utf-8 instead?\n',
'\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
'"""Module docstring say \xe2\x98\x86"""\n'))
def test_multiple_coding_comments(self):
"""Test only the first of multiple coding declarations counts"""
self._check_encoding("iso-8859-1", (
"# Is the coding: iso-8859-1\n",
"# Or is it coding: iso-8859-2\n"),
possibly_invalid=True)
self._check_encoding("iso-8859-1", (
"#!/usr/bin/python\n",
"# Is the coding: iso-8859-1\n",
"# Or is it coding: iso-8859-2\n"))
self._check_encoding("iso-8859-1", (
"# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
"# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
possibly_invalid=True)
self._check_encoding("iso-8859-2", (
"# Is the coding iso-8859-1 or coding: iso-8859-2\n",
"# Spot the missing colon above\n"))
class TestGetSourceEncoding(testtools.TestCase):
"""Test reading and caching the encodings of source files"""
def setUp(self):
testtools.TestCase.setUp(self)
dir = tempfile.mkdtemp()
self.addCleanup(os.rmdir, dir)
self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
self._written = False
def put_source(self, text):
f = open(self.filename, "w")
try:
f.write(text)
finally:
f.close()
if not self._written:
self._written = True
self.addCleanup(os.remove, self.filename)
self.addCleanup(linecache.cache.pop, self.filename, None)
def test_nonexistant_file_as_ascii(self):
"""When file can't be found, the encoding should default to ascii"""
self.assertEquals("ascii", _get_source_encoding(self.filename))
def test_encoding_is_cached(self):
"""The encoding should stay the same if the cache isn't invalidated"""
self.put_source(
"# coding: iso-8859-13\n"
"import os\n")
self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
self.put_source(
"# coding: rot-13\n"
"vzcbeg bf\n")
self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
def test_traceback_rechecks_encoding(self):
"""A traceback function checks the cache and resets the encoding"""
self.put_source(
"# coding: iso-8859-8\n"
"import os\n")
self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
self.put_source(
"# coding: utf-8\n"
"import os\n")
try:
exec (compile("raise RuntimeError\n", self.filename, "exec"))
except RuntimeError:
traceback.extract_tb(sys.exc_info()[2])
else:
self.fail("RuntimeError not raised")
self.assertEquals("utf-8", _get_source_encoding(self.filename))
class _FakeOutputStream(object):
"""A simple file-like object for testing"""
def __init__(self):
self.writelog = []
def write(self, obj):
self.writelog.append(obj)
class TestUnicodeOutputStream(testtools.TestCase):
"""Test wrapping output streams so they work with arbitrary unicode"""
uni = _u("pa\u026a\u03b8\u0259n")
def setUp(self):
super(TestUnicodeOutputStream, self).setUp()
if sys.platform == "cli":
self.skip("IronPython shouldn't wrap streams to do encoding")
def test_no_encoding_becomes_ascii(self):
"""A stream with no encoding attribute gets ascii/replace strings"""
sout = _FakeOutputStream()
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_encoding_as_none_becomes_ascii(self):
"""A stream with encoding value of None gets ascii/replace strings"""
sout = _FakeOutputStream()
sout.encoding = None
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_bogus_encoding_becomes_ascii(self):
"""A stream with a bogus encoding gets ascii/replace strings"""
sout = _FakeOutputStream()
sout.encoding = "bogus"
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_partial_encoding_replace(self):
"""A string which can be partly encoded correctly should be"""
sout = _FakeOutputStream()
sout.encoding = "iso-8859-7"
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
def test_unicode_encodings_not_wrapped(self):
"""A unicode encoding is left unwrapped as needs no error handler"""
sout = _FakeOutputStream()
sout.encoding = "utf-8"
self.assertIs(unicode_output_stream(sout), sout)
sout = _FakeOutputStream()
sout.encoding = "utf-16-be"
self.assertIs(unicode_output_stream(sout), sout)
def test_stringio(self):
"""A StringIO object should maybe get an ascii native str type"""
try:
from cStringIO import StringIO
newio = False
except ImportError:
from io import StringIO
newio = True
sout = StringIO()
soutwrapper = unicode_output_stream(sout)
if newio:
self.expectFailure("Python 3 StringIO expects text not bytes",
self.assertThat, lambda: soutwrapper.write(self.uni),
Not(Raises(MatchesException(TypeError))))
soutwrapper.write(self.uni)
self.assertEqual("pa???n", sout.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| bsd-3-clause |
susansls/zulip | zerver/webhooks/gogs/tests.py | 5 | 3082 | # -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.lib.test_classes import WebhookTestCase
class GogsHookTests(WebhookTestCase):
STREAM_NAME = 'commits'
URL_TEMPLATE = "/api/v1/external/gogs?&api_key={api_key}"
FIXTURE_DIR_NAME = 'gogs'
def test_push(self):
# type: () -> None
expected_subject = u"try-git / master"
expected_message = u"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) to branch master
* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))"""
self.send_and_test_stream_message('push', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push')
def test_push_commits_more_than_limits(self):
# type: () -> None
expected_subject = u"try-git / master"
commits_info = "* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n"
expected_message = u"john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) to branch master\n\n{}[and {} more commit(s)]".format(
commits_info * COMMITS_LIMIT,
30 - COMMITS_LIMIT
)
self.send_and_test_stream_message('push_commits_more_than_limits', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push')
def test_new_branch(self):
# type: () -> None
expected_subject = u"try-git / my_feature"
expected_message = u"john created [my_feature](http://localhost:3000/john/try-git/src/my_feature) branch"
self.send_and_test_stream_message('branch', expected_subject, expected_message, HTTP_X_GOGS_EVENT='create')
def test_pull_request_opened(self):
# type: () -> None
expected_subject = u"try-git / PR #1 Title Text for Pull Request"
expected_message = u"""john opened [PR #1](http://localhost:3000/john/try-git/pulls/1)
from `feature` to `master`"""
self.send_and_test_stream_message('pull_request_opened', expected_subject, expected_message, HTTP_X_GOGS_EVENT='pull_request')
def test_pull_request_closed(self):
# type: () -> None
expected_subject = u"try-git / PR #1 Title Text for Pull Request"
expected_message = u"""john closed [PR #1](http://localhost:3000/john/try-git/pulls/1)
from `feature` to `master`"""
self.send_and_test_stream_message('pull_request_closed', expected_subject, expected_message, HTTP_X_GOGS_EVENT='pull_request')
def test_pull_request_merged(self):
# type: () -> None
expected_subject = u"try-git / PR #2 Title Text for Pull Request"
expected_message = u"""john merged [PR #2](http://localhost:3000/john/try-git/pulls/2)
from `feature` to `master`"""
self.send_and_test_stream_message('pull_request_merged', expected_subject, expected_message, HTTP_X_GOGS_EVENT='pull_request')
| apache-2.0 |
DirtyUnicorns/android_external_chromium-org | third_party/closure_linter/closure_linter/checker.py | 135 | 4808 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking JS files for common style guide violations."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import gflags as flags
from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass
from closure_linter import javascriptlintrules
from closure_linter import javascriptstatetracker
from closure_linter.common import lintrunner
flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
'List of files with relaxed documentation checks. Will not '
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.')
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Checker that applies JavaScriptLintRules."""
def __init__(self, error_handler):
"""Initialize an JavaScriptStyleChecker object.
Args:
error_handler: Error handler to pass all errors to.
"""
self._namespaces_info = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(
self._namespaces_info),
state_tracker=javascriptstatetracker.JavaScriptStateTracker(),
metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
limited_doc_files=flags.FLAGS.limited_doc_files)
def _CheckTokens(self, token, parse_error, debug_tokens):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
token: The first token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
"""
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info and not parse_error:
self._namespaces_info.Reset()
result = (self._ExecutePass(token, self._DependencyPass) and
self._ExecutePass(token, self._LintPass,
debug_tokens=debug_tokens))
else:
result = self._ExecutePass(token, self._LintPass, parse_error,
debug_tokens)
if not result:
return False
self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
self._error_handler.FinishFile()
return True
def _DependencyPass(self, token):
"""Processes an invidual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)
class GJsLintRunner(lintrunner.LintRunner):
"""Wrapper class to run GJsLint."""
def Run(self, filenames, error_handler):
"""Run GJsLint on the given filenames.
Args:
filenames: The filenames to check
error_handler: An ErrorHandler object.
"""
checker = JavaScriptStyleChecker(error_handler)
# Check the list of files.
for filename in filenames:
checker.Check(filename)
| bsd-3-clause |
JVillella/tensorflow | tensorflow/python/kernel_tests/cholesky_op_test.py | 18 | 13673 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def SpecializedGrad(l, grad):
return gen_linalg_ops.cholesky_grad(l, grad)
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(
l, linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = sess.run([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.test_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.test_session(use_gpu=False):
with self.assertRaisesOpError(
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def testAgainstSpecialized(self):
np.random.seed(0)
data = np.random.randn(33, 33).astype(np.float32)
data = np.matmul(data, data.T)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), self.test_session(use_gpu=False) as s:
x = constant_op.constant(data, dtypes_lib.float32)
chol = linalg_ops.cholesky(x)
composite_grad = gradients_impl.gradients(chol, x, grad_data)[0]
specialized_grad = SpecializedGrad(chol, grad_data)
reference, actual = s.run([specialized_grad, composite_grad])
self.assertAllClose(reference, actual)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.test_session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
sizes = [
(4, 4), (16, 16), (256, 256), (1024, 1024), (2048, 2048),
(513, 2, 2), (513, 8, 8), (4, 513, 2, 2)
]
def _GenerateData(self, size):
batch_shape = size[:-2]
size = size[-2:]
assert size[0] == size[1]
n = size[0]
data = np.ones(size).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(data, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for size in self.sizes:
data = self._GenerateData(size)
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
l = linalg_ops.cholesky(data)
self.run_op_benchmark(
sess, control_flow_ops.group(l,),
min_iters=25,
name="cholesky_cpu_{size}".format(size=size))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
l = linalg_ops.cholesky(data)
self.run_op_benchmark(
sess, l,
min_iters=25,
name="cholesky_gpu_{size}".format(size=size))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for size in self.sizes:
data = self._GenerateData(size)
l = np.linalg.cholesky(data)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device(device):
grad = grad_fn(l, grad_data)
self.run_op_benchmark(
sess, control_flow_ops.group(grad,),
min_iters=25,
name="{name}_{dev}_{size}".format(
name=name, dev=grad.device, size=size))
if test.is_gpu_available(True):
_BenchmarkGrad(
MatrixInverseCompositeGrad, "composite_matrix_inverse", "/gpu:0")
_BenchmarkGrad(
TriAngInvCompositeGrad, "composite_tri_ang_inverse", "/gpu:0")
_BenchmarkGrad(
TriAngSolveCompositeGrad, "composite_triangular_solve", "/gpu:0")
_BenchmarkGrad(
MatrixInverseCompositeGrad, "composite_matrix_inverse", "/cpu:0")
_BenchmarkGrad(
TriAngInvCompositeGrad, "composite_tri_ang_inverse", "/cpu:0")
_BenchmarkGrad(
TriAngSolveCompositeGrad, "composite_triangular_solve", "/cpu:0")
_BenchmarkGrad(SpecializedGrad, "specialized", "/cpu:0")
if __name__ == "__main__":
test.main()
| apache-2.0 |
carlmjohnson/texttools | texttools/titlecase.py | 2 | 9428 | #!/usr/bin/env python3
"""
titlecase.py v0.1
Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
License: http://www.opensource.org/licenses/mit-license.php
"""
import unittest
import sys
import re
def titlecase(text):
"""
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing small words like a/an/the in the input.
The list of "small words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
small = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?'
punct = "[!\"#$%&'()*+,-./:;?@[\\\\\\]_`{|}~]"
small_words = re.compile(r'^(%s)$' % small, re.I)
inline_period = re.compile(r'[a-zA-Z][.][a-zA-Z]')
uc_elsewhere = re.compile(r'%s*?[a-zA-Z]+[A-Z]+?' % punct)
capfirst = re.compile(r"^%s*?([A-Za-z])" % punct)
small_first = re.compile(r'^(%s*)(%s)\b' % (punct, small), re.I)
small_last = re.compile(r'\b(%s)%s?$' % (small, punct), re.I)
subphrase = re.compile(r'([:.;?!][ ])(%s)' % small)
words = re.split('\s', text)
line = []
for word in words:
if inline_period.search(word) or uc_elsewhere.match(word):
line.append(word)
continue
if small_words.match(word):
line.append(word.lower())
continue
line.append(capfirst.sub(lambda m: m.group(0).upper(), word))
line = " ".join(line)
line = small_first.sub(lambda m: '%s%s' % (m.group(1), m.group(2).capitalize()), line)
line = small_last.sub(lambda m: m.group(0).capitalize(), line)
line = subphrase.sub(lambda m: '%s%s' % (m.group(1), m.group(2).capitalize()), line)
return line
class TitlecaseTests(unittest.TestCase):
""" Tests to ensure titlecase follows all of the rules """
def test_q_and_a(self):
""" Testing: Q&A With Steve Jobs: 'That's What Happens In
Technology' """
text = titlecase(
"Q&A With Steve Jobs: 'That's What Happens In Technology'"
)
result = "Q&A With Steve Jobs: 'That's What Happens in Technology'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_at_and_t(self):
""" Testing: What Is AT&T's Problem? """
text = titlecase("What Is AT&T's Problem?")
result = "What Is AT&T's Problem?"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_apple_deal(self):
""" Testing: Apple Deal With AT&T Falls Through """
text = titlecase("Apple Deal With AT&T Falls Through")
result = "Apple Deal With AT&T Falls Through"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_v_that(self):
""" Testing: this v that """
text = titlecase("this v that")
result = "This v That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_v_that2(self):
""" Testing: this v. that """
text = titlecase("this v. that")
result = "This v. That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_vs_that(self):
""" Testing: this vs that """
text = titlecase("this vs that")
result = "This vs That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_vs_that2(self):
""" Testing: this vs. that """
text = titlecase("this vs. that")
result = "This vs. That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_apple_sec(self):
""" Testing: The SEC's Apple Probe: What You Need to Know """
text = titlecase("The SEC's Apple Probe: What You Need to Know")
result = "The SEC's Apple Probe: What You Need to Know"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_quoted(self):
""" Testing: 'by the Way, small word at the start but within
quotes.'"""
text = titlecase(
"'by the Way, small word at the start but within quotes.'"
)
result = "'By the Way, Small Word at the Start but Within Quotes.'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_end(self):
""" Testing: Small word at end is nothing to be afraid of """
text = titlecase("Small word at end is nothing to be afraid of")
result = "Small Word at End Is Nothing to Be Afraid Of"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_sub_phrase_small_word(self):
""" Testing: Starting Sub-Phrase With a Small Word: a Trick,
Perhaps?
"""
text = titlecase(
"Starting Sub-Phrase With a Small Word: a Trick, Perhaps?"
)
result = "Starting Sub-Phrase With a Small Word: A Trick, Perhaps?"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_quotes(self):
""" Testing: Sub-Phrase With a Small Word in Quotes: 'a Trick,
Perhaps?' """
text = titlecase(
"Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'"
)
result = "Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_double_quotes(self):
""" Testing: Sub-Phrase With a Small Word in Quotes: \"a Trick,
Perhaps?\" """
text = titlecase(
'Sub-Phrase With a Small Word in Quotes: "a Trick, Perhaps?"'
)
result = 'Sub-Phrase With a Small Word in Quotes: "A Trick, Perhaps?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_nothing_to_be_afraid_of(self):
""" Testing: \"Nothing to Be Afraid of?\" """
text = titlecase('"Nothing to Be Afraid of?"')
result = '"Nothing to Be Afraid Of?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_nothing_to_be_afraid_of2(self):
""" Testing: \"Nothing to Be Afraid Of?\" """
text = titlecase('"Nothing to Be Afraid Of?"')
result = '"Nothing to Be Afraid Of?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_a_thing(self):
""" Testing: a thing """
text = titlecase('a thing')
result = 'A Thing'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_vapourware(self):
""" Testing: 2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware' """
text = titlecase(
"2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"
)
result = "2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_domains(self):
""" Testing: this is just an example.com """
text = titlecase('this is just an example.com')
result = 'This Is Just an example.com'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_domains2(self):
""" Testing: this is something listed on an del.icio.us """
text = titlecase('this is something listed on del.icio.us')
result = 'This Is Something Listed on del.icio.us'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_itunes(self):
""" Testing: iTunes should be unmolested """
text = titlecase('iTunes should be unmolested')
result = 'iTunes Should Be Unmolested'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_thoughts_on_music(self):
""" Testing: Reading Between the Lines of Steve Jobs’s ‘Thoughts on
Music’ """
text = titlecase(
'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’'
)
result = 'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_repair_perms(self):
""" Testing: Seriously, ‘Repair Permissions’ Is Voodoo """
text = titlecase('Seriously, ‘Repair Permissions’ Is Voodoo')
result = 'Seriously, ‘Repair Permissions’ Is Voodoo'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_generalissimo(self):
""" Testing: Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass """
text = titlecase(
'Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass'
)
result = 'Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
if __name__ == '__main__':
if not sys.stdin.isatty():
for line in sys.stdin:
try:
print(titlecase(line))
except:
pass
else:
suite = unittest.TestLoader().loadTestsFromTestCase(TitlecaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
tmilicic/networkx | networkx/algorithms/bipartite/tests/test_project.py | 14 | 14106 | #!/usr/bin/env python
from nose.tools import assert_equal
import networkx as nx
from networkx.algorithms import bipartite
from networkx.testing import assert_edges_equal, assert_nodes_equal
class TestBipartiteProject:
def test_path_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.projected_graph(G, [1, 3])
assert_nodes_equal(list(P), [1, 3])
assert_edges_equal(list(P.edges()), [(1, 3)])
P=bipartite.projected_graph(G, [0, 2])
assert_nodes_equal(list(P), [0, 2])
assert_edges_equal(list(P.edges()), [(0, 2)])
def test_path_projected_properties_graph(self):
G=nx.path_graph(4)
G.add_node(1,name='one')
G.add_node(2,name='two')
P=bipartite.projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
assert_equal(P.node[1]['name'],G.node[1]['name'])
P=bipartite.projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
assert_equal(P.node[2]['name'],G.node[2]['name'])
def test_path_collaboration_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_directed_path_collaboration_projected_graph(self):
G=nx.DiGraph()
G.add_path(list(range(4)))
P=bipartite.collaboration_weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.collaboration_weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_graph(self):
G=nx.path_graph(4)
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_path_weighted_projected_directed_graph(self):
G=nx.DiGraph()
G.add_path(list(range(4)))
P=bipartite.weighted_projected_graph(G,[1,3])
assert_nodes_equal(list(P),[1,3])
assert_edges_equal(list(P.edges()),[(1,3)])
P[1][3]['weight']=1
P=bipartite.weighted_projected_graph(G,[0,2])
assert_nodes_equal(list(P),[0,2])
assert_edges_equal(list(P.edges()),[(0,2)])
P[0][2]['weight']=1
def test_star_projected_graph(self):
G=nx.star_graph(3)
P=bipartite.projected_graph(G,[1,2,3])
assert_nodes_equal(list(P),[1,2,3])
assert_edges_equal(list(P.edges()),[(1,2),(1,3),(2,3)])
P=bipartite.weighted_projected_graph(G,[1,2,3])
assert_nodes_equal(list(P),[1,2,3])
assert_edges_equal(list(P.edges()),[(1,2),(1,3),(2,3)])
P=bipartite.projected_graph(G,[0])
assert_nodes_equal(list(P),[0])
assert_edges_equal(list(P.edges()),[])
def test_project_multigraph(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('a',2)
G.add_edge('b',2)
P=bipartite.projected_graph(G,'ab')
assert_edges_equal(list(P.edges()),[('a','b')])
P=bipartite.weighted_projected_graph(G,'ab')
assert_edges_equal(list(P.edges()),[('a','b')])
P=bipartite.projected_graph(G,'ab',multigraph=True)
assert_edges_equal(list(P.edges()),[('a','b'),('a','b')])
def test_project_collaboration(self):
G=nx.Graph()
G.add_edge('a',1)
G.add_edge('b',1)
G.add_edge('b',2)
G.add_edge('c',2)
G.add_edge('c',3)
G.add_edge('c',4)
G.add_edge('b',4)
P=bipartite.collaboration_weighted_projected_graph(G,'abc')
assert_equal(P['a']['b']['weight'],1)
assert_equal(P['b']['c']['weight'],2)
def test_directed_projection(self):
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge('B',2)
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
assert_equal(P['A']['B']['weight'],1)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(list(P.edges()),[('A','B')])
G=nx.DiGraph()
G.add_edge('A',1)
G.add_edge(1,'B')
G.add_edge('A',2)
G.add_edge(2,'B')
P=bipartite.projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
P=bipartite.weighted_projected_graph(G,'AB')
assert_edges_equal(list(P.edges()),[('A','B')])
assert_equal(P['A']['B']['weight'],2)
P=bipartite.projected_graph(G,'AB',multigraph=True)
assert_edges_equal(list(P.edges()),[('A','B'),('A','B')])
class TestBipartiteWeightedProjection:
def setUp(self):
# Tore Opsahl's example
# http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
self.G=nx.Graph()
self.G.add_edge('A',1)
self.G.add_edge('A',2)
self.G.add_edge('B',1)
self.G.add_edge('B',2)
self.G.add_edge('B',3)
self.G.add_edge('B',4)
self.G.add_edge('B',5)
self.G.add_edge('C',1)
self.G.add_edge('D',3)
self.G.add_edge('E',4)
self.G.add_edge('E',5)
self.G.add_edge('E',6)
self.G.add_edge('F',6)
# Graph based on figure 6 from Newman (2001)
self.N=nx.Graph()
self.N.add_edge('A',1)
self.N.add_edge('A',2)
self.N.add_edge('A',3)
self.N.add_edge('B',1)
self.N.add_edge('B',2)
self.N.add_edge('B',3)
self.N.add_edge('C',1)
self.N.add_edge('D',1)
self.N.add_edge('E',3)
def test_project_weighted_shared(self):
edges=[('A','B',2),
('A','C',1),
('B','C',1),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3),
('A','E',1),
('A','C',1),
('A','D',1),
('B','E',1),
('B','C',1),
('B','D',1),
('C','D',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_newman(self):
edges=[('A','B',1.5),
('A','C',0.5),
('B','C',0.5),
('B','D',1),
('B','E',2),
('E','F',1)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',11/6.0),
('A','E',1/2.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/2.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.collaboration_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_ratio(self):
edges=[('A','B',2/6.0),
('A','C',1/6.0),
('B','C',1/6.0),
('B','D',1/6.0),
('B','E',2/6.0),
('E','F',1/6.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.G, 'ABCDEF', ratio=True)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.weighted_projected_graph(self.N, 'ABCDE', ratio=True)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_overlap(self):
edges=[('A','B',2/2.0),
('A','C',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('B','E',2/3.0),
('E','F',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF', jaccard=False)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/1.0),
('A','C',1/1.0),
('A','D',1/1.0),
('B','E',1/1.0),
('B','C',1/1.0),
('B','D',1/1.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE', jaccard=False)
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_project_weighted_jaccard(self):
edges=[('A','B',2/5.0),
('A','C',1/2.0),
('B','C',1/5.0),
('B','D',1/5.0),
('B','E',2/6.0),
('E','F',1/3.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.G,'ABCDEF')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in list(P.edges()):
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
edges=[('A','B',3/3.0),
('A','E',1/3.0),
('A','C',1/3.0),
('A','D',1/3.0),
('B','E',1/3.0),
('B','C',1/3.0),
('B','D',1/3.0),
('C','D',1/1.0)]
Panswer=nx.Graph()
Panswer.add_weighted_edges_from(edges)
P=bipartite.overlap_weighted_projected_graph(self.N,'ABCDE')
assert_edges_equal(list(P.edges()),Panswer.edges())
for u,v in P.edges():
assert_equal(P[u][v]['weight'],Panswer[u][v]['weight'])
def test_generic_weighted_projected_graph_simple(self):
def shared(G, u, v):
return len(set(G[u]) & set(G[v]))
B = nx.path_graph(5)
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4], weight_function=shared)
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(list(G.edges(data=True))),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(list(G.edges(data=True))),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
B = nx.DiGraph()
B.add_path(list(range(5)))
G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
assert_nodes_equal(list(G), [0, 2, 4])
assert_edges_equal(list(G.edges(data=True)),
[(0, 2, {'weight': 1}), (2, 4, {'weight': 1})] )
def test_generic_weighted_projected_graph_custom(self):
def jaccard(G, u, v):
unbrs = set(G[u])
vnbrs = set(G[v])
return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
def my_weight(G, u, v, weight='weight'):
w = 0
for nbr in set(G[u]) & set(G[v]):
w += G.edge[u][nbr].get(weight, 1) + G.edge[v][nbr].get(weight, 1)
return w
B = nx.bipartite.complete_bipartite_graph(2, 2)
for i,(u,v) in enumerate(B.edges()):
B.edge[u][v]['weight'] = i + 1
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=jaccard)
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 1.0})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1],
weight_function=my_weight)
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 10})])
G = bipartite.generic_weighted_projected_graph(B, [0, 1])
assert_edges_equal(list(G.edges(data=True)), [(0, 1, {'weight': 2})])
| bsd-3-clause |
cecep-edu/edx-platform | common/lib/capa/capa/util.py | 47 | 7335 | """
Utility functions for capa.
"""
import bleach
from decimal import Decimal
from calc import evaluator
from cmath import isinf, isnan
import re
from lxml import etree
#-----------------------------------------------------------------------------
#
# Utility functions used in CAPA responsetypes
default_tolerance = '0.001%'
def compare_with_tolerance(student_complex, instructor_complex, tolerance=default_tolerance, relative_tolerance=False):
"""
Compare student_complex to instructor_complex with maximum tolerance tolerance.
- student_complex : student result (float complex number)
- instructor_complex : instructor result (float complex number)
- tolerance : float, or string (representing a float or a percentage)
- relative_tolerance: bool, to explicitly use passed tolerance as relative
Note: when a tolerance is a percentage (i.e. '10%'), it will compute that
percentage of the instructor result and yield a number.
If relative_tolerance is set to False, it will use that value and the
instructor result to define the bounds of valid student result:
instructor_complex = 10, tolerance = '10%' will give [9.0, 11.0].
If relative_tolerance is set to True, it will use that value and both
instructor result and student result to define the bounds of valid student
result:
instructor_complex = 10, student_complex = 20, tolerance = '10%' will give
[8.0, 12.0].
This is typically used internally to compare float, with a
default_tolerance = '0.001%'.
Default tolerance of 1e-3% is added to compare two floats for
near-equality (to handle machine representation errors).
Default tolerance is relative, as the acceptable difference between two
floats depends on the magnitude of the floats.
(http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/)
Examples:
In [183]: 0.000016 - 1.6*10**-5
Out[183]: -3.3881317890172014e-21
In [212]: 1.9e24 - 1.9*10**24
Out[212]: 268435456.0
"""
if isinstance(tolerance, str):
if tolerance == default_tolerance:
relative_tolerance = True
if tolerance.endswith('%'):
tolerance = evaluator(dict(), dict(), tolerance[:-1]) * 0.01
if not relative_tolerance:
tolerance = tolerance * abs(instructor_complex)
else:
tolerance = evaluator(dict(), dict(), tolerance)
if relative_tolerance:
tolerance = tolerance * max(abs(student_complex), abs(instructor_complex))
if isinf(student_complex) or isinf(instructor_complex):
# If an input is infinite, we can end up with `abs(student_complex-instructor_complex)` and
# `tolerance` both equal to infinity. Then, below we would have
# `inf <= inf` which is a fail. Instead, compare directly.
return student_complex == instructor_complex
# because student_complex and instructor_complex are not necessarily
# complex here, we enforce it here:
student_complex = complex(student_complex)
instructor_complex = complex(instructor_complex)
# if both the instructor and student input are real,
# compare them as Decimals to avoid rounding errors
if not (instructor_complex.imag or student_complex.imag):
# if either of these are not a number, short circuit and return False
if isnan(instructor_complex.real) or isnan(student_complex.real):
return False
student_decimal = Decimal(str(student_complex.real))
instructor_decimal = Decimal(str(instructor_complex.real))
tolerance_decimal = Decimal(str(tolerance))
return abs(student_decimal - instructor_decimal) <= tolerance_decimal
else:
# v1 and v2 are, in general, complex numbers:
# there are some notes about backward compatibility issue: see responsetypes.get_staff_ans()).
return abs(student_complex - instructor_complex) <= tolerance
def contextualize_text(text, context): # private
"""
Takes a string with variables. E.g. $a+$b.
Does a substitution of those variables from the context
"""
if not text:
return text
for key in sorted(context, lambda x, y: cmp(len(y), len(x))):
# TODO (vshnayder): This whole replacement thing is a big hack
# right now--context contains not just the vars defined in the
# program, but also e.g. a reference to the numpy module.
# Should be a separate dict of variables that should be
# replaced.
if '$' + key in text:
try:
s = str(context[key])
except UnicodeEncodeError:
s = context[key].encode('utf8', errors='ignore')
text = text.replace('$' + key, s)
return text
def convert_files_to_filenames(answers):
"""
Check for File objects in the dict of submitted answers,
convert File objects to their filename (string)
"""
new_answers = dict()
for answer_id in answers.keys():
answer = answers[answer_id]
# Files are stored as a list, even if one file
if is_list_of_files(answer):
new_answers[answer_id] = [f.name for f in answer]
else:
new_answers[answer_id] = answers[answer_id]
return new_answers
def is_list_of_files(files):
return isinstance(files, list) and all(is_file(f) for f in files)
def is_file(file_to_test):
"""
Duck typing to check if 'file_to_test' is a File object
"""
return all(hasattr(file_to_test, method) for method in ['read', 'name'])
def find_with_default(node, path, default):
"""
Look for a child of node using , and return its text if found.
Otherwise returns default.
Arguments:
node: lxml node
path: xpath search expression
default: value to return if nothing found
Returns:
node.find(path).text if the find succeeds, default otherwise.
"""
v = node.find(path)
if v is not None:
return v.text
else:
return default
def sanitize_html(html_code):
"""
Sanitize html_code for safe embed on LMS pages.
Used to sanitize XQueue responses from Matlab.
"""
attributes = bleach.ALLOWED_ATTRIBUTES.copy()
# Yuck! but bleach does not offer the option of passing in allowed_protocols,
# and matlab uses data urls for images
if u'data' not in bleach.BleachSanitizer.allowed_protocols:
bleach.BleachSanitizer.allowed_protocols.append(u'data')
attributes.update({
'*': ['class', 'style', 'id'],
'audio': ['controls', 'autobuffer', 'autoplay', 'src'],
'img': ['src', 'width', 'height', 'class']
})
output = bleach.clean(
html_code,
tags=bleach.ALLOWED_TAGS + ['div', 'p', 'audio', 'pre', 'img', 'span'],
styles=['white-space'],
attributes=attributes
)
return output
def get_inner_html_from_xpath(xpath_node):
"""
Returns inner html as string from xpath node.
"""
# returns string from xpath node
html = etree.tostring(xpath_node).strip()
# strips outer tag from html string
inner_html = re.sub('(?ms)<%s[^>]*>(.*)</%s>' % (xpath_node.tag, xpath_node.tag), '\\1', html)
return inner_html.strip()
| agpl-3.0 |
paul-rs/amaas-core-sdk-python | amaascore/assets/synthetic_multi_leg.py | 3 | 2129 | from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import date
from amaascore.assets.synthetic import Synthetic
class SyntheticMultiLeg(Synthetic):
"""
A synthetic asset which takes multiple assets as 'legs'. The value of the entire structure is equal to the sum of
the legs.
"""
def __init__(self, asset_id, asset_manager_id, legs=None, asset_issuer_id=None, asset_status='Active',
country_id=None, currency=None, display_name='', description='', fungible=True, issue_date=date.min,
maturity_date=date.max, comments=None, links=None, references=None, *args, **kwargs):
self.legs = legs
super(SyntheticMultiLeg, self).__init__(asset_manager_id=asset_manager_id, asset_id=asset_id,
fungible=fungible, asset_issuer_id=asset_issuer_id,
asset_status=asset_status, currency=currency,
issue_date=issue_date, maturity_date=maturity_date,
country_id=country_id, roll_price=False,
display_name=display_name, description=description,
comments=comments, links=links, references=references,
*args, **kwargs)
@property
def legs(self):
if hasattr(self, '_legs'):
return self._legs
@legs.setter
def legs(self, legs):
"""
A list of dictionaries of the legs that make up the multi-legged asset.
Format is {'asset_id': XYZ, 'quantity': ABC_Decimal}
:param legs:
:return:
"""
if legs is not None:
if not isinstance(legs, list):
raise ValueError("Invalid type for asset legs: %s" % type(legs).__name__)
if not all([isinstance(leg, dict) for leg in legs]):
raise ValueError("All asset legs must be dictionaries")
self._legs = legs
| apache-2.0 |
RominYue/sourceCode | CS231n/assignment1/cs231n/vis_utils.py | 65 | 1951 | from math import sqrt, ceil
import numpy as np
def visualize_grid(Xs, ubound=255.0, padding=1):
"""
Reshape a 4D tensor of image data to a grid for easy visualization.
Inputs:
- Xs: Data of shape (N, H, W, C)
- ubound: Output grid will have values scaled to the range [0, ubound]
- padding: The number of blank pixels between elements of the grid
"""
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in xrange(grid_size):
x0, x1 = 0, W
for x in xrange(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
# grid[y0:y1, x0:x1] = Xs[next_idx]
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
# grid_max = np.max(grid)
# grid_min = np.min(grid)
# grid = ubound * (grid - grid_min) / (grid_max - grid_min)
return grid
def vis_grid(Xs):
""" visualize a grid of images """
(N, H, W, C) = Xs.shape
A = int(ceil(sqrt(N)))
G = np.ones((A*H+A, A*W+A, C), Xs.dtype)
G *= np.min(Xs)
n = 0
for y in range(A):
for x in range(A):
if n < N:
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = Xs[n,:,:,:]
n += 1
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
def vis_nn(rows):
""" visualize array of arrays of images """
N = len(rows)
D = len(rows[0])
H,W,C = rows[0][0].shape
Xs = rows[0][0]
G = np.ones((N*H+N, D*W+D, C), Xs.dtype)
for y in range(N):
for x in range(D):
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = rows[y][x]
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
| gpl-3.0 |
jwngr/sdow | scripts/replace_titles_in_redirects_file.py | 1 | 2190 | """
Replaces page titles in the redirects file with their corresponding IDs.
Output is written to stdout.
"""
from __future__ import print_function
import io
import sys
import gzip
from sets import Set
# Validate input arguments.
if len(sys.argv) < 3:
print('[ERROR] Not enough arguments provided!')
print('[INFO] Usage: {0} <pages_file> <redirects_file>'.format(sys.argv[0]))
sys.exit()
PAGES_FILE = sys.argv[1]
REDIRECTS_FILE = sys.argv[2]
if not PAGES_FILE.endswith('.gz'):
print('[ERROR] Pages file must be gzipped.')
sys.exit()
if not REDIRECTS_FILE.endswith('.gz'):
print('[ERROR] Redirects file must be gzipped.')
sys.exit()
# Create a set of all page IDs and a dictionary of page titles to their corresponding IDs.
ALL_PAGE_IDS = Set()
PAGE_TITLES_TO_IDS = {}
for line in io.BufferedReader(gzip.open(PAGES_FILE, 'r')):
[page_id, page_title, _] = line.rstrip('\n').split('\t')
ALL_PAGE_IDS.add(page_id)
PAGE_TITLES_TO_IDS[page_title] = page_id
# Create a dictionary of redirects, replace page titles in the redirects file with their
# corresponding IDs and ignoring pages which do not exist.
REDIRECTS = {}
for line in io.BufferedReader(gzip.open(REDIRECTS_FILE, 'r')):
[source_page_id, target_page_title] = line.rstrip('\n').split('\t')
source_page_exists = source_page_id in ALL_PAGE_IDS
target_page_id = PAGE_TITLES_TO_IDS.get(target_page_title)
if source_page_exists and target_page_id is not None:
REDIRECTS[source_page_id] = target_page_id
# Loop through the redirects dictionary and remove redirects which redirect to another redirect,
# writing the remaining redirects to stdout.
for source_page_id, target_page_id in REDIRECTS.iteritems():
start_target_page_id = target_page_id
redirected_count = 0
while target_page_id in REDIRECTS:
target_page_id = REDIRECTS[target_page_id]
redirected_count += 1
# Break out if there is a circular path, meaning the redirects only point to other redirects,
# not an acutal page.
if target_page_id == start_target_page_id or redirected_count > 100:
target_page_id = None
if target_page_id is not None:
print('\t'.join([source_page_id, target_page_id]))
| mit |
CSC301H-Fall2013/JuakStore | site-packages/build/lib/django/contrib/databrowse/plugins/calendars.py | 100 | 5855 | from __future__ import unicode_literals
from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.contrib.databrowse.sites import DatabrowsePlugin
from django.shortcuts import render_to_response
from django.utils.html import format_html, format_html_join
from django.utils.text import capfirst
from django.utils.encoding import force_text
from django.views.generic import dates
from django.utils import datetime_safe
class DateViewMixin(object):
allow_empty = False
allow_future = True
root_url = None
model = None
field = None
def get_context_data(self, **kwargs):
context = super(DateViewMixin, self).get_context_data(**kwargs)
context.update({
'root_url': self.root_url,
'model': self.model,
'field': self.field
})
return context
class DayView(DateViewMixin, dates.DayArchiveView):
template_name = 'databrowse/calendar_day.html'
class MonthView(DateViewMixin, dates.MonthArchiveView):
template_name = 'databrowse/calendar_month.html'
class YearView(DateViewMixin, dates.YearArchiveView):
template_name = 'databrowse/calendar_year.html'
class IndexView(DateViewMixin, dates.ArchiveIndexView):
template_name = 'databrowse/calendar_main.html'
class CalendarPlugin(DatabrowsePlugin):
def __init__(self, field_names=None):
self.field_names = field_names
def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set, it takes
take that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)])
else:
return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and f.name in self.field_names])
def model_index_html(self, request, model, site):
fields = self.field_dict(model)
if not fields:
return ''
return format_html('<p class="filter"><strong>View calendar by:</strong> {0}</p>',
format_html_join(', ', '<a href="calendars/{0}/">{1}</a>',
((f.name, force_text(capfirst(f.verbose_name))) for f in fields.values())))
def urls(self, plugin_name, easy_instance_field):
if isinstance(easy_instance_field.field, models.DateField):
d = easy_instance_field.raw_value
return ['%s%s/%s/%s/%s/%s/' % (
easy_instance_field.model.url(),
plugin_name, easy_instance_field.field.name,
str(d.year),
datetime_safe.new_date(d).strftime('%b').lower(),
d.day)]
def model_view(self, request, model_databrowse, url):
self.model, self.site = model_databrowse.model, model_databrowse.site
self.fields = self.field_dict(self.model)
# If the model has no DateFields, there's no point in going further.
if not self.fields:
raise http.Http404('The requested model has no calendars.')
if url is None:
return self.homepage_view(request)
url_bits = url.split('/')
if url_bits[0] in self.fields:
return self.calendar_view(request, self.fields[url_bits[0]], *url_bits[1:])
raise http.Http404('The requested page does not exist.')
def homepage_view(self, request):
easy_model = EasyModel(self.site, self.model)
field_list = list(self.fields.values())
field_list.sort(key=lambda k:k.verbose_name)
return render_to_response('databrowse/calendar_homepage.html', {
'root_url': self.site.root_url,
'model': easy_model,
'field_list': field_list
})
def calendar_view(self, request, field, year=None, month=None, day=None):
easy_model = EasyModel(self.site, self.model)
root_url = self.site.root_url
if day is not None:
return DayView.as_view(
year=year, month=month, day=day,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif month is not None:
return MonthView.as_view(
year=year, month=month,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
elif year is not None:
return YearView.as_view(
year=year,
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
else:
return IndexView.as_view(
date_field=field.name,
queryset=easy_model.get_query_set(),
root_url=root_url,
model=easy_model,
field=field
)(request)
assert False, ('%s, %s, %s, %s' % (field, year, month, day))
| mit |
trondeau/gnuradio | gr-digital/python/digital/ofdm_txrx.py | 27 | 20975 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
OFDM Transmitter / Receiver hier blocks.
For simple configurations, no need to connect all the relevant OFDM blocks
to form an OFDM Tx/Rx--simply use these.
"""
# Reminder: All frequency-domain stuff is in shifted form, i.e. DC carrier
# in the middle!
import numpy
from gnuradio import gr
import digital_swig as digital
from utils import tagged_streams
try:
# This will work when feature #505 is added.
from gnuradio import fft
from gnuradio import blocks
from gnuradio import analog
except ImportError:
# Until then this will work.
import fft_swig as fft
import blocks_swig as blocks
import analog_swig as analog
_def_fft_len = 64
_def_cp_len = 16
_def_frame_length_tag_key = "frame_length"
_def_packet_length_tag_key = "packet_length"
_def_packet_num_tag_key = "packet_num"
# Data and pilot carriers are same as in 802.11a
_def_occupied_carriers = (range(-26, -21) + range(-20, -7) + range(-6, 0) + range(1, 7) + range(8, 21) + range(22, 27),)
_def_pilot_carriers=((-21, -7, 7, 21,),)
_pilot_sym_scramble_seq = (
1,1,1,1, -1,-1,-1,1, -1,-1,-1,-1, 1,1,-1,1, -1,-1,1,1, -1,1,1,-1, 1,1,1,1, 1,1,-1,1,
1,1,-1,1, 1,-1,-1,1, 1,1,-1,1, -1,-1,-1,1, -1,1,-1,-1, 1,-1,-1,1, 1,1,1,1, -1,-1,1,1,
-1,-1,1,-1, 1,-1,1,1, -1,-1,-1,1, 1,-1,-1,-1, -1,1,-1,-1, 1,-1,1,1, 1,1,-1,1, -1,1,-1,1,
-1,-1,-1,-1, -1,1,-1,1, 1,-1,1,-1, 1,1,1,-1, -1,1,-1,-1, -1,1,1,1, -1,-1,-1,-1, -1,-1,-1
)
_def_pilot_symbols= tuple([(x, x, x, -x) for x in _pilot_sym_scramble_seq])
_seq_seed = 42
def _get_active_carriers(fft_len, occupied_carriers, pilot_carriers):
""" Returns a list of all carriers that at some point carry data or pilots. """
active_carriers = list()
for carrier in list(occupied_carriers[0]) + list(pilot_carriers[0]):
if carrier < 0:
carrier += fft_len
active_carriers.append(carrier)
return active_carriers
def _make_sync_word1(fft_len, occupied_carriers, pilot_carriers):
""" Creates a random sync sequence for fine frequency offset and timing
estimation. This is the first of typically two sync preamble symbols
for the Schmidl & Cox sync algorithm.
The relevant feature of this symbols is that every second sub-carrier
is zero. In the time domain, this results in two identical halves of
the OFDM symbols.
Symbols are always BPSK symbols. Carriers are scaled by sqrt(2) to keep
total energy constant.
Carrier 0 (DC carrier) is always zero. If used, carrier 1 is non-zero.
This means the sync algorithm has to check on odd carriers!
"""
active_carriers = _get_active_carriers(fft_len, occupied_carriers, pilot_carriers)
numpy.random.seed(_seq_seed)
bpsk = {0: numpy.sqrt(2), 1: -numpy.sqrt(2)}
sw1 = [bpsk[numpy.random.randint(2)] if x in active_carriers and x % 2 else 0 for x in range(fft_len)]
return numpy.fft.fftshift(sw1)
def _make_sync_word2(fft_len, occupied_carriers, pilot_carriers):
""" Creates a random sync sequence for coarse frequency offset and channel
estimation. This is the second of typically two sync preamble symbols
for the Schmidl & Cox sync algorithm.
Symbols are always BPSK symbols.
"""
active_carriers = _get_active_carriers(fft_len, occupied_carriers, pilot_carriers)
numpy.random.seed(_seq_seed)
bpsk = {0: 1, 1: -1}
sw2 = [bpsk[numpy.random.randint(2)] if x in active_carriers else 0 for x in range(fft_len)]
sw2[0] = 0j
return numpy.fft.fftshift(sw2)
def _get_constellation(bps):
""" Returns a modulator block for a given number of bits per symbol """
constellation = {
1: digital.constellation_bpsk(),
2: digital.constellation_qpsk(),
3: digital.constellation_8psk()
}
try:
return constellation[bps]
except KeyError:
print 'Modulation not supported.'
exit(1)
class ofdm_tx(gr.hier_block2):
"""Hierarchical block for OFDM modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
Args:
fft_len: The length of FFT (integer).
cp_len: The length of cyclic prefix in total samples (integer).
packet_length_tag_key: The name of the tag giving packet length at the input.
occupied_carriers: A vector of vectors describing which OFDM carriers are occupied.
pilot_carriers: A vector of vectors describing which OFDM carriers are occupied with pilot symbols.
pilot_symbols: The pilot symbols.
bps_header: Bits per symbol (header).
bps_payload: Bits per symbol (payload).
sync_word1: The first sync preamble symbol. This has to be with
| zeros on alternating carriers. Used for fine and
| coarse frequency offset and timing estimation.
sync_word2: The second sync preamble symbol. This has to be filled
| entirely. Also used for coarse frequency offset and
| channel estimation.
rolloff: The rolloff length in samples. Must be smaller than the CP.
debug_log: Write output into log files (Warning: creates lots of data!)
scramble_bits: Activates the scramblers (set this to True unless debugging)
"""
def __init__(self, fft_len=_def_fft_len, cp_len=_def_cp_len,
packet_length_tag_key=_def_packet_length_tag_key,
occupied_carriers=_def_occupied_carriers,
pilot_carriers=_def_pilot_carriers,
pilot_symbols=_def_pilot_symbols,
bps_header=1,
bps_payload=1,
sync_word1=None,
sync_word2=None,
rolloff=0,
debug_log=False,
scramble_bits=False
):
gr.hier_block2.__init__(self, "ofdm_tx",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
### Param init / sanity check ########################################
self.fft_len = fft_len
self.cp_len = cp_len
self.packet_length_tag_key = packet_length_tag_key
self.occupied_carriers = occupied_carriers
self.pilot_carriers = pilot_carriers
self.pilot_symbols = pilot_symbols
self.bps_header = bps_header
self.bps_payload = bps_payload
self.sync_word1 = sync_word1
if sync_word1 is None:
self.sync_word1 = _make_sync_word1(fft_len, occupied_carriers, pilot_carriers)
else:
if len(sync_word1) != self.fft_len:
raise ValueError("Length of sync sequence(s) must be FFT length.")
self.sync_words = [self.sync_word1,]
if sync_word2 is None:
self.sync_word2 = _make_sync_word2(fft_len, occupied_carriers, pilot_carriers)
else:
self.sync_word2 = sync_word2
if len(self.sync_word2):
if len(self.sync_word2) != fft_len:
raise ValueError("Length of sync sequence(s) must be FFT length.")
self.sync_word2 = list(self.sync_word2)
self.sync_words.append(self.sync_word2)
if scramble_bits:
self.scramble_seed = 0x7f
else:
self.scramble_seed = 0x00 # We deactivate the scrambler by init'ing it with zeros
### Header modulation ################################################
crc = digital.crc32_bb(False, self.packet_length_tag_key)
header_constellation = _get_constellation(bps_header)
header_mod = digital.chunks_to_symbols_bc(header_constellation.points())
formatter_object = digital.packet_header_ofdm(
occupied_carriers=occupied_carriers, n_syms=1,
bits_per_header_sym=self.bps_header,
bits_per_payload_sym=self.bps_payload,
scramble_header=scramble_bits
)
header_gen = digital.packet_headergenerator_bb(formatter_object.base(), self.packet_length_tag_key)
header_payload_mux = blocks.tagged_stream_mux(
itemsize=gr.sizeof_gr_complex*1,
lengthtagname=self.packet_length_tag_key,
tag_preserve_head_pos=1 # Head tags on the payload stream stay on the head
)
self.connect(
self,
crc,
header_gen,
header_mod,
(header_payload_mux, 0)
)
if debug_log:
self.connect(header_gen, blocks.file_sink(1, 'tx-hdr.dat'))
### Payload modulation ###############################################
payload_constellation = _get_constellation(bps_payload)
payload_mod = digital.chunks_to_symbols_bc(payload_constellation.points())
payload_scrambler = digital.additive_scrambler_bb(
0x8a,
self.scramble_seed,
7,
0, # Don't reset after fixed length (let the reset tag do that)
bits_per_byte=8, # This is before unpacking
reset_tag_key=self.packet_length_tag_key
)
payload_unpack = blocks.repack_bits_bb(
8, # Unpack 8 bits per byte
bps_payload,
self.packet_length_tag_key
)
self.connect(
crc,
payload_scrambler,
payload_unpack,
payload_mod,
(header_payload_mux, 1)
)
### Create OFDM frame ################################################
allocator = digital.ofdm_carrier_allocator_cvc(
self.fft_len,
occupied_carriers=self.occupied_carriers,
pilot_carriers=self.pilot_carriers,
pilot_symbols=self.pilot_symbols,
sync_words=self.sync_words,
len_tag_key=self.packet_length_tag_key
)
ffter = fft.fft_vcc(
self.fft_len,
False, # Inverse FFT
(), # No window
True # Shift
)
cyclic_prefixer = digital.ofdm_cyclic_prefixer(
self.fft_len,
self.fft_len+self.cp_len,
rolloff,
self.packet_length_tag_key
)
self.connect(header_payload_mux, allocator, ffter, cyclic_prefixer, self)
if debug_log:
self.connect(allocator, blocks.file_sink(gr.sizeof_gr_complex * fft_len, 'tx-post-allocator.dat'))
self.connect(cyclic_prefixer, blocks.file_sink(gr.sizeof_gr_complex, 'tx-signal.dat'))
class ofdm_rx(gr.hier_block2):
"""Hierarchical block for OFDM demodulation.
The input is a complex baseband signal (e.g. from a UHD source).
The detected packets are output as a stream of packed bits on the output.
Args:
fft_len: The length of FFT (integer).
cp_len: The length of cyclic prefix in total samples (integer).
frame_length_tag_key: Used internally to tag the length of the OFDM frame.
packet_length_tag_key: The name of the tag giving packet length at the input.
occupied_carriers: A vector of vectors describing which OFDM carriers are occupied.
pilot_carriers: A vector of vectors describing which OFDM carriers are occupied with pilot symbols.
pilot_symbols: The pilot symbols.
bps_header: Bits per symbol (header).
bps_payload: Bits per symbol (payload).
sync_word1: The first sync preamble symbol. This has to be with
| zeros on alternating carriers. Used for fine and
| coarse frequency offset and timing estimation.
sync_word2: The second sync preamble symbol. This has to be filled
| entirely. Also used for coarse frequency offset and
| channel estimation.
"""
def __init__(self, fft_len=_def_fft_len, cp_len=_def_cp_len,
frame_length_tag_key=_def_frame_length_tag_key,
packet_length_tag_key=_def_packet_length_tag_key,
packet_num_tag_key=_def_packet_num_tag_key,
occupied_carriers=_def_occupied_carriers,
pilot_carriers=_def_pilot_carriers,
pilot_symbols=_def_pilot_symbols,
bps_header=1,
bps_payload=1,
sync_word1=None,
sync_word2=None,
debug_log=False,
scramble_bits=False
):
gr.hier_block2.__init__(self, "ofdm_rx",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_char))
### Param init / sanity check ########################################
self.fft_len = fft_len
self.cp_len = cp_len
self.frame_length_tag_key = frame_length_tag_key
self.packet_length_tag_key = packet_length_tag_key
self.occupied_carriers = occupied_carriers
self.bps_header = bps_header
self.bps_payload = bps_payload
n_sync_words = 1
if sync_word1 is None:
self.sync_word1 = _make_sync_word1(fft_len, occupied_carriers, pilot_carriers)
else:
if len(sync_word1) != self.fft_len:
raise ValueError("Length of sync sequence(s) must be FFT length.")
self.sync_word1 = sync_word1
self.sync_word2 = ()
if sync_word2 is None:
self.sync_word2 = _make_sync_word2(fft_len, occupied_carriers, pilot_carriers)
n_sync_words = 2
elif len(sync_word2):
if len(sync_word2) != fft_len:
raise ValueError("Length of sync sequence(s) must be FFT length.")
self.sync_word2 = sync_word2
n_sync_words = 2
if scramble_bits:
self.scramble_seed = 0x7f
else:
self.scramble_seed = 0x00 # We deactivate the scrambler by init'ing it with zeros
### Sync ############################################################
sync_detect = digital.ofdm_sync_sc_cfb(fft_len, cp_len)
delay = blocks.delay(gr.sizeof_gr_complex, fft_len+cp_len)
oscillator = analog.frequency_modulator_fc(-2.0 / fft_len)
mixer = blocks.multiply_cc()
hpd = digital.header_payload_demux(
n_sync_words+1, # Number of OFDM symbols before payload (sync + 1 sym header)
fft_len, cp_len, # FFT length, guard interval
frame_length_tag_key, # Frame length tag key
"", # We're not using trigger tags
True # One output item is one OFDM symbol (False would output complex scalars)
)
self.connect(self, sync_detect)
self.connect(self, delay, (mixer, 0), (hpd, 0))
self.connect((sync_detect, 0), oscillator, (mixer, 1))
self.connect((sync_detect, 1), (hpd, 1))
if debug_log:
self.connect((sync_detect, 0), blocks.file_sink(gr.sizeof_float, 'freq-offset.dat'))
self.connect((sync_detect, 1), blocks.file_sink(gr.sizeof_char, 'sync-detect.dat'))
### Header demodulation ##############################################
header_fft = fft.fft_vcc(self.fft_len, True, (), True)
chanest = digital.ofdm_chanest_vcvc(self.sync_word1, self.sync_word2, 1)
header_constellation = _get_constellation(bps_header)
header_equalizer = digital.ofdm_equalizer_simpledfe(
fft_len,
header_constellation.base(),
occupied_carriers,
pilot_carriers,
pilot_symbols,
symbols_skipped=0,
)
header_eq = digital.ofdm_frame_equalizer_vcvc(
header_equalizer.base(),
cp_len,
self.frame_length_tag_key,
True,
1 # Header is 1 symbol long
)
header_serializer = digital.ofdm_serializer_vcc(
fft_len, occupied_carriers,
self.frame_length_tag_key
)
header_demod = digital.constellation_decoder_cb(header_constellation.base())
header_formatter = digital.packet_header_ofdm(
occupied_carriers, 1,
packet_length_tag_key,
frame_length_tag_key,
packet_num_tag_key,
bps_header,
bps_payload,
scramble_header=scramble_bits
)
header_parser = digital.packet_headerparser_b(header_formatter.formatter())
self.connect(
(hpd, 0),
header_fft,
chanest,
header_eq,
header_serializer,
header_demod,
header_parser
)
self.msg_connect(header_parser, "header_data", hpd, "header_data")
if debug_log:
self.connect((chanest, 1), blocks.file_sink(gr.sizeof_gr_complex * fft_len, 'channel-estimate.dat'))
self.connect((chanest, 0), blocks.file_sink(gr.sizeof_gr_complex * fft_len, 'post-hdr-chanest.dat'))
self.connect((chanest, 0), blocks.tag_debug(gr.sizeof_gr_complex * fft_len, 'post-hdr-chanest'))
self.connect(header_eq, blocks.file_sink(gr.sizeof_gr_complex * fft_len, 'post-hdr-eq.dat'))
self.connect(header_serializer, blocks.file_sink(gr.sizeof_gr_complex, 'post-hdr-serializer.dat'))
self.connect(header_descrambler, blocks.file_sink(1, 'post-hdr-demod.dat'))
### Payload demod ####################################################
payload_fft = fft.fft_vcc(self.fft_len, True, (), True)
payload_constellation = _get_constellation(bps_payload)
payload_equalizer = digital.ofdm_equalizer_simpledfe(
fft_len,
payload_constellation.base(),
occupied_carriers,
pilot_carriers,
pilot_symbols,
symbols_skipped=1, # (that was already in the header)
alpha=0.1
)
payload_eq = digital.ofdm_frame_equalizer_vcvc(
payload_equalizer.base(),
cp_len,
self.frame_length_tag_key
)
payload_serializer = digital.ofdm_serializer_vcc(
fft_len, occupied_carriers,
self.frame_length_tag_key,
self.packet_length_tag_key,
1 # Skip 1 symbol (that was already in the header)
)
payload_demod = digital.constellation_decoder_cb(payload_constellation.base())
self.payload_descrambler = digital.additive_scrambler_bb(
0x8a,
self.scramble_seed,
7,
0, # Don't reset after fixed length
bits_per_byte=8, # This is after packing
reset_tag_key=self.packet_length_tag_key
)
payload_pack = blocks.repack_bits_bb(bps_payload, 8, self.packet_length_tag_key, True)
self.crc = digital.crc32_bb(True, self.packet_length_tag_key)
self.connect(
(hpd, 1),
payload_fft,
payload_eq,
payload_serializer,
payload_demod,
payload_pack,
self.payload_descrambler,
self.crc,
self
)
if debug_log:
self.connect((hpd, 1), blocks.tag_debug(gr.sizeof_gr_complex*fft_len, 'post-hpd'))
self.connect(payload_fft, blocks.file_sink(gr.sizeof_gr_complex*fft_len, 'post-payload-fft.dat'))
self.connect(payload_eq, blocks.file_sink(gr.sizeof_gr_complex*fft_len, 'post-payload-eq.dat'))
self.connect(payload_serializer, blocks.file_sink(gr.sizeof_gr_complex, 'post-payload-serializer.dat'))
self.connect(payload_demod, blocks.file_sink(1, 'post-payload-demod.dat'))
self.connect(payload_pack, blocks.file_sink(1, 'post-payload-pack.dat'))
self.connect(crc, blocks.file_sink(1, 'post-payload-crc.dat'))
| gpl-3.0 |
OpenBazaar/Network | interfaces.py | 6 | 6090 | __author__ = 'chris'
from zope.interface import Interface, Attribute
class Multiplexer(Interface):
"""
This interface defines the structure of the protocol class that handles creating new network connections
and sending and receiving messages. At present this is only used by the OpenBazaarProtocol class which
is the protocol for our UDP server. In the future if we want to add additional transports, like I2P, we
they will need to implement this interface so as to not break the rest of the code.
"""
processors = Attribute("""A list of `MessageProcessors`""")
testnet = Attribute("""`bool` are we using testnet""")
vendors = Attribute("""A list `dht.node.Node` vendors""")
ws = Attribute("""The websocket API server""")
blockchain = Attribute("""The `LibbitcoinClient` instance""")
def register_processor(processor):
"""
A method add a `MessageProcessor` to the processors attribute.
"""
def unregister_processor(processor):
"""
Remove a `MessageProcessor` from the processors list.
"""
def set_servers(ws, blockchain):
"""
Set the ws and blockchain attributes.
"""
def send_message(datagram, address, relay_addr):
"""
Send a message over the wire to the given address
Args:
datagram: the serialized message to send
address: the recipients address `tuple`
relay_addr: a replay address `tuple` if used, otherwise None
"""
def __getitem__(addr):
"""
Return the `Connection` of the given address.
Args:
addr: Tuple of destination address (ip, port).
Raises:
KeyError: No connection is handling the given address.
"""
class ConnectionHandler(Interface):
"""
A handler class for each connection.
"""
connection = Attribute("""a `Connection` object for this handler""")
node = Attribute("""a `dht.node.Node` object for the peer. This may be set after receiving the first message""")
processors = Attribute("""A list of `MessageProcessors`""")
def receive_message(datagram):
"""
Receive a datagram over the wire.
"""
def check_new_connection():
"""
Return True if this is the first time this is called else False
"""
class Connection(Interface):
"""
A class representing a connection to a remote peer
"""
handler = Attribute("""a `ConnectionHandler` object for this connection""")
state = Attribute("""a `txrudp.connection.State` enum showing this connection's state""")
def send_message(message):
"""
Send the serialized message to the remote peer.
"""
class MessageProcessor(Interface):
"""
This is an interface for processing messages coming off the wire. Classes that implement this interface should be
passed into 'OpenBazaarProtocol.register_processor' which will parse new messages to determine the message type
then route them to the correct processor.
"""
multiplexer = Attribute("""The main `ConnectionMultiplexer` protocol.
We pass it in here so we can send datagrams from this class.""")
def receive_message(datagram, sender, connection, ban_score):
"""
Called by OpenBazaarProtocol when it receives a new message intended for this processor.
Args:
datagram: The protobuf that came off the wire in unserialized format. Basic validity checks, such as
minimum size and valid protobuf format have already been done.
sender: a `node.Node` object sent by the sender.
connection: the txrudp connection to the peer who sent the message. To respond directly to the peer call
connection.send_message()
ban_score: a `net.dos.BanScore` object used to keep track of misbehaving peers. We need it here because
the processor determines if the incoming message is a request or a response before passing it into
the BanScore.
"""
def connect_multiplexer(multiplexer):
"""
Connect the main ConnectionMultiplexer to this class so we can send outgoing messages.
"""
def __iter__():
"""
OpenBazaarProtocol will use this to check which message types are handled by this processor.
:return: iter([list of enums])
"""
class BroadcastListener(Interface):
"""
An interface for handling broadcasts sent to followers.
"""
def notify(guid, message):
"""
New broadcasts will be sent here. They will only show if this node is following the node
which sent the broadcast.
"""
class MessageListener(Interface):
"""
An interface for handling messages sent between nodes.
"""
def notify(plaintext_message, signature):
"""
New messages will be sent here if they decrypt and parse correctly.
Args:
plaintext_message: the protobuf object containing the message
signature: the signature covering the message.
"""
class NotificationListener(Interface):
"""
An interface for handling event notifications. New events should update this
listener which will save the notifications to the db and push it to UI via websockets.
"""
def notify(guid, handle, type, order_id, title, image_hash):
"""
This should be called to register a new notification.
Args:
guid: (in hex) optional depending on notification type.
handle: optional depending on notification type.
type: a `String` containing the type of notification,
(ex: Follow, New Order, Order Confirmation, Payment Received).
order_id: an order id if this notification is for an order
title: a `String` which can be used for the item's title if an order notification.
image_hash: optional depending on notification type.
""" | mit |
ycasg/PyNLO | src/validation/pulse_test.py | 4 | 3953 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 10:19:43 2015
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: ycasg
"""
import unittest
import numpy as np
from pynlo.light.DerivedPulses import SechPulse, CWPulse
class SechPowerTest(unittest.TestCase):
TEST_PTS = 2**8
power = 3.2
fr = 2720.
EPP = power / (fr*1e6)
def setUp(self):
self.sech = SechPulse(power = self.power,
T0_ps = 0.0100,
center_wavelength_nm = 1064,
NPTS = self.TEST_PTS,
time_window_ps = 12.345,
frep_MHz = self.fr,
power_is_avg = True)
def tearDown(self):
pass
def test_wavelength_meters(self):
self.assertAlmostEqual(self.sech.calc_epp(), self.EPP)
class SechTest(unittest.TestCase):
TEST_PTS = 2**8
def setUp(self):
self.sech = SechPulse(power = 2.727,
T0_ps = 0.0100,
center_wavelength_nm = 1064,
NPTS = self.TEST_PTS,
time_window_ps = 12.345)
def tearDown(self):
pass
def test_wavelength_meters(self):
self.assertAlmostEqual(self.sech.wl_mks[int(self.sech.NPTS/2)], 1064.*1e-9)
def test_wavelength_nm(self):
self.assertAlmostEqual(self.sech.wl_nm[int(self.sech.NPTS/2)], 1064.)
def test_frequency_Hz(self):
self.assertAlmostEqual(self.sech.W_mks[int(self.sech.NPTS/2)] /\
( 2*np.pi*299792458/(1064.*1e-9)), 1.0)
def test_frequency_THz(self):
self.assertAlmostEqual(self.sech.W_THz[int(self.sech.NPTS/2)] /\
( 1e-12*2*np.pi*299792458/(1064.*1e-9)), 1.0)
def test_npts(self):
self.assertEqual(self.sech.NPTS, self.TEST_PTS)
self.assertEqual(self.sech._n, self.TEST_PTS)
def test_timewindow(self):
self.assertAlmostEqual(self.sech.time_window_ps, 12.345)
self.assertAlmostEqual(self.sech.time_window_mks, 12.345e-12)
def test_timeaxis(self):
self.assertAlmostEqual(self.sech.T_ps[-1] - self.sech.T_ps[0], 12.345,1)
self.assertAlmostEqual(self.sech.T_mks[-1] - self.sech.T_mks[0], 12.345e-12,1)
def test_temporal_peak(self):
self.assertAlmostEqual(np.max(np.abs(self.sech.AT))**2.0/ 2.727, 1, 2)
def test_temporal_width(self):
Tfwhm = 2.0*np.arccosh( np.sqrt(2.0)) * 0.0100
half_max = 0.5*np.max(np.abs(self.sech.AT)**2)
T1 = max(self.sech.T_ps[np.abs(self.sech.AT)**2 >= half_max])
T2 = min(self.sech.T_ps[np.abs(self.sech.AT)**2 >= half_max])
self.assertTrue( abs( (T1-T2) - Tfwhm) < 2*self.sech.dT_ps)
class CWTest(unittest.TestCase):
TEST_PTS = 2**8
def setUp(self):
self.cw = CWPulse(1, 1550, NPTS = self.TEST_PTS)
def tearDown(self):
pass
def test_wavelength_meters(self):
center_wl = self.cw.wl_nm[np.argmax(abs(self.cw.AW))]
self.assertAlmostEqual(center_wl, 1550)
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
jetskijoe/SickGear | lib/hachoir_metadata/misc.py | 57 | 8595 | from hachoir_metadata.metadata import RootMetadata, registerExtractor
from hachoir_metadata.safe import fault_tolerant
from hachoir_parser.container import SwfFile
from hachoir_parser.misc import TorrentFile, TrueTypeFontFile, OLE2_File, PcfFile
from hachoir_core.field import isString
from hachoir_core.error import warning
from hachoir_parser import guessParser
from hachoir_metadata.setter import normalizeString
class TorrentMetadata(RootMetadata):
KEY_TO_ATTR = {
u"announce": "url",
u"comment": "comment",
u"creation_date": "creation_date",
}
INFO_TO_ATTR = {
u"length": "file_size",
u"name": "filename",
}
def extract(self, torrent):
for field in torrent[0]:
self.processRoot(field)
@fault_tolerant
def processRoot(self, field):
if field.name in self.KEY_TO_ATTR:
key = self.KEY_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "info" and "value" in field:
for field in field["value"]:
self.processInfo(field)
@fault_tolerant
def processInfo(self, field):
if field.name in self.INFO_TO_ATTR:
key = self.INFO_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "piece_length":
self.comment = "Piece length: %s" % field.display
class TTF_Metadata(RootMetadata):
NAMEID_TO_ATTR = {
0: "copyright", # Copyright notice
3: "title", # Unique font identifier
5: "version", # Version string
8: "author", # Manufacturer name
11: "url", # URL Vendor
14: "copyright", # License info URL
}
def extract(self, ttf):
if "header" in ttf:
self.extractHeader(ttf["header"])
if "names" in ttf:
self.extractNames(ttf["names"])
@fault_tolerant
def extractHeader(self, header):
self.creation_date = header["created"].value
self.last_modification = header["modified"].value
self.comment = u"Smallest readable size in pixels: %s pixels" % header["lowest"].value
self.comment = u"Font direction: %s" % header["font_dir"].display
@fault_tolerant
def extractNames(self, names):
offset = names["offset"].value
for header in names.array("header"):
key = header["nameID"].value
foffset = offset + header["offset"].value
field = names.getFieldByAddress(foffset*8)
if not field or not isString(field):
continue
value = field.value
if key not in self.NAMEID_TO_ATTR:
continue
key = self.NAMEID_TO_ATTR[key]
if key == "version" and value.startswith(u"Version "):
# "Version 1.2" => "1.2"
value = value[8:]
setattr(self, key, value)
class OLE2_Metadata(RootMetadata):
SUMMARY_ID_TO_ATTR = {
2: "title", # Title
3: "title", # Subject
4: "author",
6: "comment",
8: "author", # Last saved by
12: "creation_date",
13: "last_modification",
14: "nb_page",
18: "producer",
}
IGNORE_SUMMARY = set((
1, # Code page
))
DOC_SUMMARY_ID_TO_ATTR = {
3: "title", # Subject
14: "author", # Manager
}
IGNORE_DOC_SUMMARY = set((
1, # Code page
))
def extract(self, ole2):
self._extract(ole2)
def _extract(self, fieldset):
try:
fieldset._feedAll()
except StopIteration:
pass
if "root[0]" in fieldset:
self._extract(self.getFragment(fieldset["root[0]"]))
doc_summary = self.getField(fieldset, "doc_summary[0]")
if doc_summary:
self.useSummary(doc_summary, True)
word_doc = self.getField(fieldset, "word_doc[0]")
if word_doc:
self.useWordDocument(word_doc)
summary = self.getField(fieldset, "summary[0]")
if summary:
self.useSummary(summary, False)
def getFragment(self, frag):
stream = frag.getSubIStream()
ministream = guessParser(stream)
if not ministream:
warning("Unable to create the OLE2 mini stream parser!")
return frag
return ministream
def getField(self, fieldset, name):
# _feedAll() is needed to make sure that we get all fragments
# eg. summary[0], summary[1], ..., summary[n]
try:
fieldset._feedAll()
except StopIteration:
pass
if name not in fieldset:
return None
field = fieldset[name]
return self.getFragment(field)
@fault_tolerant
def useSummary(self, summary, is_doc_summary):
if "os" in summary:
self.os = summary["os"].display
if "section[0]" not in summary:
return
summary = summary["section[0]"]
for property in summary.array("property_index"):
self.useProperty(summary, property, is_doc_summary)
@fault_tolerant
def useWordDocument(self, doc):
self.comment = "Encrypted: %s" % doc["FIB/fEncrypted"].value
@fault_tolerant
def useProperty(self, summary, property, is_doc_summary):
field = summary.getFieldByAddress(property["offset"].value*8)
if not field \
or "value" not in field:
return
field = field["value"]
if not field.hasValue():
return
# Get value
value = field.value
if isinstance(value, (str, unicode)):
value = normalizeString(value)
if not value:
return
# Get property identifier
prop_id = property["id"].value
if is_doc_summary:
id_to_attr = self.DOC_SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_DOC_SUMMARY
else:
id_to_attr = self.SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_SUMMARY
if prop_id in ignore:
return
# Get Hachoir metadata key
try:
key = id_to_attr[prop_id]
use_prefix = False
except LookupError:
key = "comment"
use_prefix = True
if use_prefix:
prefix = property["id"].display
if (prefix in ("TotalEditingTime", "LastPrinted")) \
and (not field):
# Ignore null time delta
return
value = "%s: %s" % (prefix, value)
else:
if (key == "last_modification") and (not field):
# Ignore null timestamp
return
setattr(self, key, value)
class PcfMetadata(RootMetadata):
PROP_TO_KEY = {
'CHARSET_REGISTRY': 'charset',
'COPYRIGHT': 'copyright',
'WEIGHT_NAME': 'font_weight',
'FOUNDRY': 'author',
'FONT': 'title',
'_XMBDFED_INFO': 'producer',
}
def extract(self, pcf):
if "properties" in pcf:
self.useProperties(pcf["properties"])
def useProperties(self, properties):
last = properties["total_str_length"]
offset0 = last.address + last.size
for index in properties.array("property"):
# Search name and value
value = properties.getFieldByAddress(offset0+index["value_offset"].value*8)
if not value:
continue
value = value.value
if not value:
continue
name = properties.getFieldByAddress(offset0+index["name_offset"].value*8)
if not name:
continue
name = name.value
if name not in self.PROP_TO_KEY:
warning("Skip %s=%r" % (name, value))
continue
key = self.PROP_TO_KEY[name]
setattr(self, key, value)
class SwfMetadata(RootMetadata):
def extract(self, swf):
self.height = swf["rect/ymax"].value # twips
self.width = swf["rect/xmax"].value # twips
self.format_version = "flash version %s" % swf["version"].value
self.frame_rate = swf["frame_rate"].value
self.comment = "Frame count: %s" % swf["frame_count"].value
registerExtractor(TorrentFile, TorrentMetadata)
registerExtractor(TrueTypeFontFile, TTF_Metadata)
registerExtractor(OLE2_File, OLE2_Metadata)
registerExtractor(PcfFile, PcfMetadata)
registerExtractor(SwfFile, SwfMetadata)
| gpl-3.0 |
2014c2g14/cda0519 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/colordict.py | 621 | 24077 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
THECOLORS = {
'gray17' : (43, 43, 43, 255) ,
'gold' : (255, 215, 0, 255) ,
'gray10' : (26, 26, 26, 255) ,
'yellow' : (255, 255, 0, 255) ,
'gray11' : (28, 28, 28, 255) ,
'grey61' : (156, 156, 156, 255) ,
'grey60' : (153, 153, 153, 255) ,
'darkseagreen' : (143, 188, 143, 255) ,
'grey62' : (158, 158, 158, 255) ,
'grey65' : (166, 166, 166, 255) ,
'gray12' : (31, 31, 31, 255) ,
'grey67' : (171, 171, 171, 255) ,
'grey66' : (168, 168, 168, 255) ,
'grey69' : (176, 176, 176, 255) ,
'gray21' : (54, 54, 54, 255) ,
'lightsalmon4' : (139, 87, 66, 255) ,
'lightsalmon2' : (238, 149, 114, 255) ,
'lightsalmon3' : (205, 129, 98, 255) ,
'lightsalmon1' : (255, 160, 122, 255) ,
'gray32' : (82, 82, 82, 255) ,
'green4' : (0, 139, 0, 255) ,
'gray30' : (77, 77, 77, 255) ,
'gray31' : (79, 79, 79, 255) ,
'green1' : (0, 255, 0, 255) ,
'gray37' : (94, 94, 94, 255) ,
'green3' : (0, 205, 0, 255) ,
'green2' : (0, 238, 0, 255) ,
'darkslategray1' : (151, 255, 255, 255) ,
'darkslategray2' : (141, 238, 238, 255) ,
'darkslategray3' : (121, 205, 205, 255) ,
'aquamarine1' : (127, 255, 212, 255) ,
'aquamarine3' : (102, 205, 170, 255) ,
'aquamarine2' : (118, 238, 198, 255) ,
'papayawhip' : (255, 239, 213, 255) ,
'black' : (0, 0, 0, 255) ,
'darkorange3' : (205, 102, 0, 255) ,
'oldlace' : (253, 245, 230, 255) ,
'lightgoldenrod4' : (139, 129, 76, 255) ,
'gray90' : (229, 229, 229, 255) ,
'orchid1' : (255, 131, 250, 255) ,
'orchid2' : (238, 122, 233, 255) ,
'orchid3' : (205, 105, 201, 255) ,
'grey68' : (173, 173, 173, 255) ,
'brown' : (165, 42, 42, 255) ,
'purple2' : (145, 44, 238, 255) ,
'gray80' : (204, 204, 204, 255) ,
'antiquewhite3' : (205, 192, 176, 255) ,
'antiquewhite2' : (238, 223, 204, 255) ,
'antiquewhite1' : (255, 239, 219, 255) ,
'palevioletred3' : (205, 104, 137, 255) ,
'hotpink' : (255, 105, 180, 255) ,
'lightcyan' : (224, 255, 255, 255) ,
'coral3' : (205, 91, 69, 255) ,
'gray8' : (20, 20, 20, 255) ,
'gray9' : (23, 23, 23, 255) ,
'grey32' : (82, 82, 82, 255) ,
'bisque4' : (139, 125, 107, 255) ,
'cyan' : (0, 255, 255, 255) ,
'gray0' : (0, 0, 0, 255) ,
'gray1' : (3, 3, 3, 255) ,
'gray6' : (15, 15, 15, 255) ,
'bisque1' : (255, 228, 196, 255) ,
'bisque2' : (238, 213, 183, 255) ,
'bisque3' : (205, 183, 158, 255) ,
'skyblue' : (135, 206, 235, 255) ,
'gray' : (190, 190, 190, 255) ,
'darkturquoise' : (0, 206, 209, 255) ,
'rosybrown4' : (139, 105, 105, 255) ,
'deepskyblue3' : (0, 154, 205, 255) ,
'grey63' : (161, 161, 161, 255) ,
'indianred1' : (255, 106, 106, 255) ,
'grey78' : (199, 199, 199, 255) ,
'lightpink' : (255, 182, 193, 255) ,
'gray88' : (224, 224, 224, 255) ,
'gray22' : (56, 56, 56, 255) ,
'red' : (255, 0, 0, 255) ,
'grey11' : (28, 28, 28, 255) ,
'lemonchiffon3' : (205, 201, 165, 255) ,
'lemonchiffon2' : (238, 233, 191, 255) ,
'lemonchiffon1' : (255, 250, 205, 255) ,
'indianred3' : (205, 85, 85, 255) ,
'violetred1' : (255, 62, 150, 255) ,
'plum2' : (238, 174, 238, 255) ,
'plum1' : (255, 187, 255, 255) ,
'lemonchiffon4' : (139, 137, 112, 255) ,
'gray99' : (252, 252, 252, 255) ,
'grey13' : (33, 33, 33, 255) ,
'grey55' : (140, 140, 140, 255) ,
'darkcyan' : (0, 139, 139, 255) ,
'chocolate4' : (139, 69, 19, 255) ,
'lightgoldenrodyellow' : (250, 250, 210, 255) ,
'gray54' : (138, 138, 138, 255) ,
'lavender' : (230, 230, 250, 255) ,
'chartreuse3' : (102, 205, 0, 255) ,
'chartreuse2' : (118, 238, 0, 255) ,
'chartreuse1' : (127, 255, 0, 255) ,
'grey48' : (122, 122, 122, 255) ,
'grey16' : (41, 41, 41, 255) ,
'thistle' : (216, 191, 216, 255) ,
'chartreuse4' : (69, 139, 0, 255) ,
'darkorchid4' : (104, 34, 139, 255) ,
'grey42' : (107, 107, 107, 255) ,
'grey41' : (105, 105, 105, 255) ,
'grey17' : (43, 43, 43, 255) ,
'dimgrey' : (105, 105, 105, 255) ,
'dodgerblue4' : (16, 78, 139, 255) ,
'darkorchid2' : (178, 58, 238, 255) ,
'darkorchid3' : (154, 50, 205, 255) ,
'blue' : (0, 0, 255, 255) ,
'rosybrown2' : (238, 180, 180, 255) ,
'honeydew' : (240, 255, 240, 255) ,
'gray18' : (46, 46, 46, 255) ,
'cornflowerblue' : (100, 149, 237, 255) ,
'grey91' : (232, 232, 232, 255) ,
'gray14' : (36, 36, 36, 255) ,
'gray15' : (38, 38, 38, 255) ,
'gray16' : (41, 41, 41, 255) ,
'maroon4' : (139, 28, 98, 255) ,
'maroon3' : (205, 41, 144, 255) ,
'maroon2' : (238, 48, 167, 255) ,
'maroon1' : (255, 52, 179, 255) ,
'gray13' : (33, 33, 33, 255) ,
'gold3' : (205, 173, 0, 255) ,
'gold2' : (238, 201, 0, 255) ,
'gold1' : (255, 215, 0, 255) ,
'grey79' : (201, 201, 201, 255) ,
'palevioletred1' : (255, 130, 171, 255) ,
'palevioletred2' : (238, 121, 159, 255) ,
'gold4' : (139, 117, 0, 255) ,
'gray41' : (105, 105, 105, 255) ,
'gray84' : (214, 214, 214, 255) ,
'mediumpurple' : (147, 112, 219, 255) ,
'rosybrown1' : (255, 193, 193, 255) ,
'lightblue2' : (178, 223, 238, 255) ,
'lightblue3' : (154, 192, 205, 255) ,
'grey57' : (145, 145, 145, 255) ,
'lightblue1' : (191, 239, 255, 255) ,
'lightblue4' : (104, 131, 139, 255) ,
'gray33' : (84, 84, 84, 255) ,
'skyblue4' : (74, 112, 139, 255) ,
'grey97' : (247, 247, 247, 255) ,
'skyblue1' : (135, 206, 255, 255) ,
'gray27' : (69, 69, 69, 255) ,
'skyblue3' : (108, 166, 205, 255) ,
'skyblue2' : (126, 192, 238, 255) ,
'lavenderblush1' : (255, 240, 245, 255) ,
'darkgrey' : (169, 169, 169, 255) ,
'lavenderblush3' : (205, 193, 197, 255) ,
'darkslategrey' : (47, 79, 79, 255) ,
'lavenderblush4' : (139, 131, 134, 255) ,
'deeppink4' : (139, 10, 80, 255) ,
'grey99' : (252, 252, 252, 255) ,
'gray36' : (92, 92, 92, 255) ,
'coral4' : (139, 62, 47, 255) ,
'magenta3' : (205, 0, 205, 255) ,
'lightskyblue4' : (96, 123, 139, 255) ,
'mediumturquoise' : (72, 209, 204, 255) ,
'gray34' : (87, 87, 87, 255) ,
'floralwhite' : (255, 250, 240, 255) ,
'grey39' : (99, 99, 99, 255) ,
'grey36' : (92, 92, 92, 255) ,
'grey37' : (94, 94, 94, 255) ,
'grey34' : (87, 87, 87, 255) ,
'gray26' : (66, 66, 66, 255) ,
'royalblue2' : (67, 110, 238, 255) ,
'grey33' : (84, 84, 84, 255) ,
'turquoise1' : (0, 245, 255, 255) ,
'grey31' : (79, 79, 79, 255) ,
'steelblue1' : (99, 184, 255, 255) ,
'sienna4' : (139, 71, 38, 255) ,
'steelblue3' : (79, 148, 205, 255) ,
'lavenderblush2' : (238, 224, 229, 255) ,
'sienna1' : (255, 130, 71, 255) ,
'steelblue4' : (54, 100, 139, 255) ,
'sienna3' : (205, 104, 57, 255) ,
'aquamarine4' : (69, 139, 116, 255) ,
'lightyellow1' : (255, 255, 224, 255) ,
'lightyellow2' : (238, 238, 209, 255) ,
'lightsteelblue' : (176, 196, 222, 255) ,
'lightyellow4' : (139, 139, 122, 255) ,
'magenta2' : (238, 0, 238, 255) ,
'lightskyblue1' : (176, 226, 255, 255) ,
'lightgoldenrod' : (238, 221, 130, 255) ,
'magenta4' : (139, 0, 139, 255) ,
'gray87' : (222, 222, 222, 255) ,
'greenyellow' : (173, 255, 47, 255) ,
'navajowhite4' : (139, 121, 94, 255) ,
'darkslategray4' : (82, 139, 139, 255) ,
'olivedrab' : (107, 142, 35, 255) ,
'navajowhite1' : (255, 222, 173, 255) ,
'navajowhite2' : (238, 207, 161, 255) ,
'darkgoldenrod1' : (255, 185, 15, 255) ,
'sienna' : (160, 82, 45, 255) ,
'blue1' : (0, 0, 255, 255) ,
'yellow1' : (255, 255, 0, 255) ,
'gray61' : (156, 156, 156, 255) ,
'magenta1' : (255, 0, 255, 255) ,
'grey52' : (133, 133, 133, 255) ,
'orangered4' : (139, 37, 0, 255) ,
'palegreen' : (152, 251, 152, 255) ,
'gray86' : (219, 219, 219, 255) ,
'grey80' : (204, 204, 204, 255) ,
'seashell' : (255, 245, 238, 255) ,
'royalblue' : (65, 105, 225, 255) ,
'firebrick3' : (205, 38, 38, 255) ,
'blue4' : (0, 0, 139, 255) ,
'peru' : (205, 133, 63, 255) ,
'gray60' : (153, 153, 153, 255) ,
'aquamarine' : (127, 255, 212, 255) ,
'grey53' : (135, 135, 135, 255) ,
'tan4' : (139, 90, 43, 255) ,
'darkgoldenrod' : (184, 134, 11, 255) ,
'tan2' : (238, 154, 73, 255) ,
'tan1' : (255, 165, 79, 255) ,
'darkslategray' : (47, 79, 79, 255) ,
'royalblue3' : (58, 95, 205, 255) ,
'red2' : (238, 0, 0, 255) ,
'red1' : (255, 0, 0, 255) ,
'dodgerblue' : (30, 144, 255, 255) ,
'violetred4' : (139, 34, 82, 255) ,
'lightyellow' : (255, 255, 224, 255) ,
'paleturquoise1' : (187, 255, 255, 255) ,
'firebrick2' : (238, 44, 44, 255) ,
'mediumaquamarine' : (102, 205, 170, 255) ,
'lemonchiffon' : (255, 250, 205, 255) ,
'chocolate' : (210, 105, 30, 255) ,
'orchid4' : (139, 71, 137, 255) ,
'maroon' : (176, 48, 96, 255) ,
'gray38' : (97, 97, 97, 255) ,
'darkorange4' : (139, 69, 0, 255) ,
'mintcream' : (245, 255, 250, 255) ,
'darkorange1' : (255, 127, 0, 255) ,
'antiquewhite' : (250, 235, 215, 255) ,
'darkorange2' : (238, 118, 0, 255) ,
'grey18' : (46, 46, 46, 255) ,
'grey19' : (48, 48, 48, 255) ,
'grey38' : (97, 97, 97, 255) ,
'moccasin' : (255, 228, 181, 255) ,
'grey10' : (26, 26, 26, 255) ,
'chocolate1' : (255, 127, 36, 255) ,
'chocolate2' : (238, 118, 33, 255) ,
'chocolate3' : (205, 102, 29, 255) ,
'saddlebrown' : (139, 69, 19, 255) ,
'grey15' : (38, 38, 38, 255) ,
'darkslateblue' : (72, 61, 139, 255) ,
'lightskyblue' : (135, 206, 250, 255) ,
'gray69' : (176, 176, 176, 255) ,
'gray68' : (173, 173, 173, 255) ,
'deeppink' : (255, 20, 147, 255) ,
'gray65' : (166, 166, 166, 255) ,
'gray64' : (163, 163, 163, 255) ,
'gray67' : (171, 171, 171, 255) ,
'gray66' : (168, 168, 168, 255) ,
'gray25' : (64, 64, 64, 255) ,
'coral' : (255, 127, 80, 255) ,
'gray63' : (161, 161, 161, 255) ,
'gray62' : (158, 158, 158, 255) ,
'goldenrod4' : (139, 105, 20, 255) ,
'grey35' : (89, 89, 89, 255) ,
'gray89' : (227, 227, 227, 255) ,
'goldenrod1' : (255, 193, 37, 255) ,
'goldenrod2' : (238, 180, 34, 255) ,
'goldenrod3' : (205, 155, 29, 255) ,
'springgreen1' : (0, 255, 127, 255) ,
'springgreen2' : (0, 238, 118, 255) ,
'springgreen3' : (0, 205, 102, 255) ,
'springgreen4' : (0, 139, 69, 255) ,
'mistyrose1' : (255, 228, 225, 255) ,
'sandybrown' : (244, 164, 96, 255) ,
'grey30' : (77, 77, 77, 255) ,
'seashell2' : (238, 229, 222, 255) ,
'seashell3' : (205, 197, 191, 255) ,
'tan' : (210, 180, 140, 255) ,
'seashell1' : (255, 245, 238, 255) ,
'mistyrose3' : (205, 183, 181, 255) ,
'magenta' : (255, 0, 255, 255) ,
'pink' : (255, 192, 203, 255) ,
'ivory2' : (238, 238, 224, 255) ,
'ivory1' : (255, 255, 240, 255) ,
'lightcyan2' : (209, 238, 238, 255) ,
'mediumseagreen' : (60, 179, 113, 255) ,
'ivory4' : (139, 139, 131, 255) ,
'darkorange' : (255, 140, 0, 255) ,
'powderblue' : (176, 224, 230, 255) ,
'dodgerblue1' : (30, 144, 255, 255) ,
'gray95' : (242, 242, 242, 255) ,
'firebrick1' : (255, 48, 48, 255) ,
'gray7' : (18, 18, 18, 255) ,
'mistyrose4' : (139, 125, 123, 255) ,
'tomato' : (255, 99, 71, 255) ,
'indianred2' : (238, 99, 99, 255) ,
'steelblue2' : (92, 172, 238, 255) ,
'gray100' : (255, 255, 255, 255) ,
'seashell4' : (139, 134, 130, 255) ,
'grey89' : (227, 227, 227, 255) ,
'grey88' : (224, 224, 224, 255) ,
'grey87' : (222, 222, 222, 255) ,
'grey86' : (219, 219, 219, 255) ,
'grey85' : (217, 217, 217, 255) ,
'grey84' : (214, 214, 214, 255) ,
'midnightblue' : (25, 25, 112, 255) ,
'grey82' : (209, 209, 209, 255) ,
'grey81' : (207, 207, 207, 255) ,
'yellow3' : (205, 205, 0, 255) ,
'ivory3' : (205, 205, 193, 255) ,
'grey22' : (56, 56, 56, 255) ,
'gray85' : (217, 217, 217, 255) ,
'violetred3' : (205, 50, 120, 255) ,
'dodgerblue2' : (28, 134, 238, 255) ,
'gray42' : (107, 107, 107, 255) ,
'sienna2' : (238, 121, 66, 255) ,
'grey72' : (184, 184, 184, 255) ,
'grey73' : (186, 186, 186, 255) ,
'grey70' : (179, 179, 179, 255) ,
'palevioletred' : (219, 112, 147, 255) ,
'lightslategray' : (119, 136, 153, 255) ,
'grey77' : (196, 196, 196, 255) ,
'grey74' : (189, 189, 189, 255) ,
'slategray1' : (198, 226, 255, 255) ,
'pink1' : (255, 181, 197, 255) ,
'mediumpurple1' : (171, 130, 255, 255) ,
'pink3' : (205, 145, 158, 255) ,
'antiquewhite4' : (139, 131, 120, 255) ,
'lightpink1' : (255, 174, 185, 255) ,
'honeydew2' : (224, 238, 224, 255) ,
'khaki4' : (139, 134, 78, 255) ,
'darkolivegreen4' : (110, 139, 61, 255) ,
'gray45' : (115, 115, 115, 255) ,
'slategray3' : (159, 182, 205, 255) ,
'darkolivegreen1' : (202, 255, 112, 255) ,
'khaki1' : (255, 246, 143, 255) ,
'khaki2' : (238, 230, 133, 255) ,
'khaki3' : (205, 198, 115, 255) ,
'lavenderblush' : (255, 240, 245, 255) ,
'honeydew4' : (131, 139, 131, 255) ,
'salmon3' : (205, 112, 84, 255) ,
'salmon2' : (238, 130, 98, 255) ,
'gray92' : (235, 235, 235, 255) ,
'salmon4' : (139, 76, 57, 255) ,
'gray49' : (125, 125, 125, 255) ,
'gray48' : (122, 122, 122, 255) ,
'linen' : (250, 240, 230, 255) ,
'burlywood1' : (255, 211, 155, 255) ,
'green' : (0, 255, 0, 255) ,
'gray47' : (120, 120, 120, 255) ,
'blueviolet' : (138, 43, 226, 255) ,
'brown2' : (238, 59, 59, 255) ,
'brown3' : (205, 51, 51, 255) ,
'peachpuff' : (255, 218, 185, 255) ,
'brown4' : (139, 35, 35, 255) ,
'firebrick4' : (139, 26, 26, 255) ,
'azure1' : (240, 255, 255, 255) ,
'azure3' : (193, 205, 205, 255) ,
'azure2' : (224, 238, 238, 255) ,
'azure4' : (131, 139, 139, 255) ,
'tomato4' : (139, 54, 38, 255) ,
'orange4' : (139, 90, 0, 255) ,
'firebrick' : (178, 34, 34, 255) ,
'indianred' : (205, 92, 92, 255) ,
'orange1' : (255, 165, 0, 255) ,
'orange3' : (205, 133, 0, 255) ,
'orange2' : (238, 154, 0, 255) ,
'darkolivegreen' : (85, 107, 47, 255) ,
'gray2' : (5, 5, 5, 255) ,
'slategrey' : (112, 128, 144, 255) ,
'gray81' : (207, 207, 207, 255) ,
'darkred' : (139, 0, 0, 255) ,
'gray3' : (8, 8, 8, 255) ,
'lightsteelblue1' : (202, 225, 255, 255) ,
'lightsteelblue2' : (188, 210, 238, 255) ,
'lightsteelblue3' : (162, 181, 205, 255) ,
'lightsteelblue4' : (110, 123, 139, 255) ,
'tomato3' : (205, 79, 57, 255) ,
'gray43' : (110, 110, 110, 255) ,
'darkgoldenrod4' : (139, 101, 8, 255) ,
'grey50' : (127, 127, 127, 255) ,
'yellow4' : (139, 139, 0, 255) ,
'mediumorchid' : (186, 85, 211, 255) ,
'yellow2' : (238, 238, 0, 255) ,
'darkgoldenrod2' : (238, 173, 14, 255) ,
'darkgoldenrod3' : (205, 149, 12, 255) ,
'chartreuse' : (127, 255, 0, 255) ,
'mediumblue' : (0, 0, 205, 255) ,
'gray4' : (10, 10, 10, 255) ,
'springgreen' : (0, 255, 127, 255) ,
'orange' : (255, 165, 0, 255) ,
'gray5' : (13, 13, 13, 255) ,
'lightsalmon' : (255, 160, 122, 255) ,
'gray19' : (48, 48, 48, 255) ,
'turquoise' : (64, 224, 208, 255) ,
'lightseagreen' : (32, 178, 170, 255) ,
'grey8' : (20, 20, 20, 255) ,
'grey9' : (23, 23, 23, 255) ,
'grey6' : (15, 15, 15, 255) ,
'grey7' : (18, 18, 18, 255) ,
'grey4' : (10, 10, 10, 255) ,
'grey5' : (13, 13, 13, 255) ,
'grey2' : (5, 5, 5, 255) ,
'grey3' : (8, 8, 8, 255) ,
'grey0' : (0, 0, 0, 255) ,
'grey1' : (3, 3, 3, 255) ,
'gray50' : (127, 127, 127, 255) ,
'goldenrod' : (218, 165, 32, 255) ,
'grey58' : (148, 148, 148, 255) ,
'grey59' : (150, 150, 150, 255) ,
'gray51' : (130, 130, 130, 255) ,
'grey54' : (138, 138, 138, 255) ,
'mediumorchid4' : (122, 55, 139, 255) ,
'grey56' : (143, 143, 143, 255) ,
'navajowhite3' : (205, 179, 139, 255) ,
'mediumorchid1' : (224, 102, 255, 255) ,
'grey51' : (130, 130, 130, 255) ,
'mediumorchid3' : (180, 82, 205, 255) ,
'mediumorchid2' : (209, 95, 238, 255) ,
'cyan2' : (0, 238, 238, 255) ,
'cyan3' : (0, 205, 205, 255) ,
'gray23' : (59, 59, 59, 255) ,
'cyan1' : (0, 255, 255, 255) ,
'darkgreen' : (0, 100, 0, 255) ,
'gray24' : (61, 61, 61, 255) ,
'cyan4' : (0, 139, 139, 255) ,
'darkviolet' : (148, 0, 211, 255) ,
'peachpuff4' : (139, 119, 101, 255) ,
'gray28' : (71, 71, 71, 255) ,
'slateblue4' : (71, 60, 139, 255) ,
'slateblue3' : (105, 89, 205, 255) ,
'peachpuff1' : (255, 218, 185, 255) ,
'peachpuff2' : (238, 203, 173, 255) ,
'peachpuff3' : (205, 175, 149, 255) ,
'gray29' : (74, 74, 74, 255) ,
'paleturquoise' : (175, 238, 238, 255) ,
'darkgray' : (169, 169, 169, 255) ,
'grey25' : (64, 64, 64, 255) ,
'darkmagenta' : (139, 0, 139, 255) ,
'palegoldenrod' : (238, 232, 170, 255) ,
'grey64' : (163, 163, 163, 255) ,
'grey12' : (31, 31, 31, 255) ,
'deeppink3' : (205, 16, 118, 255) ,
'gray79' : (201, 201, 201, 255) ,
'gray83' : (212, 212, 212, 255) ,
'deeppink2' : (238, 18, 137, 255) ,
'burlywood4' : (139, 115, 85, 255) ,
'palevioletred4' : (139, 71, 93, 255) ,
'deeppink1' : (255, 20, 147, 255) ,
'slateblue2' : (122, 103, 238, 255) ,
'grey46' : (117, 117, 117, 255) ,
'royalblue4' : (39, 64, 139, 255) ,
'yellowgreen' : (154, 205, 50, 255) ,
'royalblue1' : (72, 118, 255, 255) ,
'slateblue1' : (131, 111, 255, 255) ,
'lightgoldenrod3' : (205, 190, 112, 255) ,
'lightgoldenrod2' : (238, 220, 130, 255) ,
'navy' : (0, 0, 128, 255) ,
'orchid' : (218, 112, 214, 255) ,
'ghostwhite' : (248, 248, 255, 255) ,
'purple' : (160, 32, 240, 255) ,
'darkkhaki' : (189, 183, 107, 255) ,
'grey45' : (115, 115, 115, 255) ,
'gray94' : (240, 240, 240, 255) ,
'wheat4' : (139, 126, 102, 255) ,
'gray96' : (245, 245, 245, 255) ,
'gray97' : (247, 247, 247, 255) ,
'wheat1' : (255, 231, 186, 255) ,
'gray91' : (232, 232, 232, 255) ,
'wheat3' : (205, 186, 150, 255) ,
'wheat2' : (238, 216, 174, 255) ,
'indianred4' : (139, 58, 58, 255) ,
'coral2' : (238, 106, 80, 255) ,
'coral1' : (255, 114, 86, 255) ,
'violetred' : (208, 32, 144, 255) ,
'rosybrown3' : (205, 155, 155, 255) ,
'deepskyblue2' : (0, 178, 238, 255) ,
'deepskyblue1' : (0, 191, 255, 255) ,
'bisque' : (255, 228, 196, 255) ,
'grey49' : (125, 125, 125, 255) ,
'khaki' : (240, 230, 140, 255) ,
'wheat' : (245, 222, 179, 255) ,
'lightslateblue' : (132, 112, 255, 255) ,
'mediumpurple3' : (137, 104, 205, 255) ,
'gray55' : (140, 140, 140, 255) ,
'deepskyblue' : (0, 191, 255, 255) ,
'gray98' : (250, 250, 250, 255) ,
'steelblue' : (70, 130, 180, 255) ,
'aliceblue' : (240, 248, 255, 255) ,
'lightskyblue2' : (164, 211, 238, 255) ,
'lightskyblue3' : (141, 182, 205, 255) ,
'lightslategrey' : (119, 136, 153, 255) ,
'blue3' : (0, 0, 205, 255) ,
'blue2' : (0, 0, 238, 255) ,
'gainsboro' : (220, 220, 220, 255) ,
'grey76' : (194, 194, 194, 255) ,
'purple3' : (125, 38, 205, 255) ,
'plum4' : (139, 102, 139, 255) ,
'gray56' : (143, 143, 143, 255) ,
'plum3' : (205, 150, 205, 255) ,
'plum' : (221, 160, 221, 255) ,
'lightgrey' : (211, 211, 211, 255) ,
'mediumslateblue' : (123, 104, 238, 255) ,
'mistyrose' : (255, 228, 225, 255) ,
'lightcyan1' : (224, 255, 255, 255) ,
'grey71' : (181, 181, 181, 255) ,
'darksalmon' : (233, 150, 122, 255) ,
'beige' : (245, 245, 220, 255) ,
'grey24' : (61, 61, 61, 255) ,
'azure' : (240, 255, 255, 255) ,
'honeydew1' : (240, 255, 240, 255) ,
'slategray2' : (185, 211, 238, 255) ,
'dodgerblue3' : (24, 116, 205, 255) ,
'slategray4' : (108, 123, 139, 255) ,
'grey27' : (69, 69, 69, 255) ,
'lightcyan3' : (180, 205, 205, 255) ,
'cornsilk' : (255, 248, 220, 255) ,
'tomato1' : (255, 99, 71, 255) ,
'gray57' : (145, 145, 145, 255) ,
'mediumvioletred' : (199, 21, 133, 255) ,
'tomato2' : (238, 92, 66, 255) ,
'snow4' : (139, 137, 137, 255) ,
'grey75' : (191, 191, 191, 255) ,
'snow2' : (238, 233, 233, 255) ,
'snow3' : (205, 201, 201, 255) ,
'snow1' : (255, 250, 250, 255) ,
'grey23' : (59, 59, 59, 255) ,
'cornsilk3' : (205, 200, 177, 255) ,
'lightcoral' : (240, 128, 128, 255) ,
'orangered' : (255, 69, 0, 255) ,
'navajowhite' : (255, 222, 173, 255) ,
'mediumpurple2' : (159, 121, 238, 255) ,
'slategray' : (112, 128, 144, 255) ,
'pink2' : (238, 169, 184, 255) ,
'grey29' : (74, 74, 74, 255) ,
'grey28' : (71, 71, 71, 255) ,
'gray82' : (209, 209, 209, 255) ,
'burlywood' : (222, 184, 135, 255) ,
'mediumpurple4' : (93, 71, 139, 255) ,
'mediumspringgreen' : (0, 250, 154, 255) ,
'grey26' : (66, 66, 66, 255) ,
'grey21' : (54, 54, 54, 255) ,
'grey20' : (51, 51, 51, 255) ,
'blanchedalmond' : (255, 235, 205, 255) ,
'pink4' : (139, 99, 108, 255) ,
'gray78' : (199, 199, 199, 255) ,
'tan3' : (205, 133, 63, 255) ,
'gray76' : (194, 194, 194, 255) ,
'gray77' : (196, 196, 196, 255) ,
'white' : (255, 255, 255, 255) ,
'gray75' : (191, 191, 191, 255) ,
'gray72' : (184, 184, 184, 255) ,
'gray73' : (186, 186, 186, 255) ,
'gray70' : (179, 179, 179, 255) ,
'gray71' : (181, 181, 181, 255) ,
'lightgray' : (211, 211, 211, 255) ,
'ivory' : (255, 255, 240, 255) ,
'gray46' : (117, 117, 117, 255) ,
'gray74' : (189, 189, 189, 255) ,
'lightyellow3' : (205, 205, 180, 255) ,
'lightpink2' : (238, 162, 173, 255) ,
'lightpink3' : (205, 140, 149, 255) ,
'paleturquoise4' : (102, 139, 139, 255) ,
'lightpink4' : (139, 95, 101, 255) ,
'paleturquoise3' : (150, 205, 205, 255) ,
'seagreen4' : (46, 139, 87, 255) ,
'seagreen3' : (67, 205, 128, 255) ,
'seagreen2' : (78, 238, 148, 255) ,
'seagreen1' : (84, 255, 159, 255) ,
'paleturquoise2' : (174, 238, 238, 255) ,
'gray52' : (133, 133, 133, 255) ,
'cornsilk4' : (139, 136, 120, 255) ,
'cornsilk2' : (238, 232, 205, 255) ,
'darkolivegreen3' : (162, 205, 90, 255) ,
'cornsilk1' : (255, 248, 220, 255) ,
'limegreen' : (50, 205, 50, 255) ,
'darkolivegreen2' : (188, 238, 104, 255) ,
'grey' : (190, 190, 190, 255) ,
'violetred2' : (238, 58, 140, 255) ,
'salmon1' : (255, 140, 105, 255) ,
'grey92' : (235, 235, 235, 255) ,
'grey93' : (237, 237, 237, 255) ,
'grey94' : (240, 240, 240, 255) ,
'grey95' : (242, 242, 242, 255) ,
'grey96' : (245, 245, 245, 255) ,
'grey83' : (212, 212, 212, 255) ,
'grey98' : (250, 250, 250, 255) ,
'lightgoldenrod1' : (255, 236, 139, 255) ,
'palegreen1' : (154, 255, 154, 255) ,
'red3' : (205, 0, 0, 255) ,
'palegreen3' : (124, 205, 124, 255) ,
'palegreen2' : (144, 238, 144, 255) ,
'palegreen4' : (84, 139, 84, 255) ,
'cadetblue' : (95, 158, 160, 255) ,
'violet' : (238, 130, 238, 255) ,
'mistyrose2' : (238, 213, 210, 255) ,
'slateblue' : (106, 90, 205, 255) ,
'grey43' : (110, 110, 110, 255) ,
'grey90' : (229, 229, 229, 255) ,
'gray35' : (89, 89, 89, 255) ,
'turquoise3' : (0, 197, 205, 255) ,
'turquoise2' : (0, 229, 238, 255) ,
'burlywood3' : (205, 170, 125, 255) ,
'burlywood2' : (238, 197, 145, 255) ,
'lightcyan4' : (122, 139, 139, 255) ,
'rosybrown' : (188, 143, 143, 255) ,
'turquoise4' : (0, 134, 139, 255) ,
'whitesmoke' : (245, 245, 245, 255) ,
'lightblue' : (173, 216, 230, 255) ,
'grey40' : (102, 102, 102, 255) ,
'gray40' : (102, 102, 102, 255) ,
'honeydew3' : (193, 205, 193, 255) ,
'dimgray' : (105, 105, 105, 255) ,
'grey47' : (120, 120, 120, 255) ,
'seagreen' : (46, 139, 87, 255) ,
'red4' : (139, 0, 0, 255) ,
'grey14' : (36, 36, 36, 255) ,
'snow' : (255, 250, 250, 255) ,
'darkorchid1' : (191, 62, 255, 255) ,
'gray58' : (148, 148, 148, 255) ,
'gray59' : (150, 150, 150, 255) ,
'cadetblue4' : (83, 134, 139, 255) ,
'cadetblue3' : (122, 197, 205, 255) ,
'cadetblue2' : (142, 229, 238, 255) ,
'cadetblue1' : (152, 245, 255, 255) ,
'olivedrab4' : (105, 139, 34, 255) ,
'purple4' : (85, 26, 139, 255) ,
'gray20' : (51, 51, 51, 255) ,
'grey44' : (112, 112, 112, 255) ,
'purple1' : (155, 48, 255, 255) ,
'olivedrab1' : (192, 255, 62, 255) ,
'olivedrab2' : (179, 238, 58, 255) ,
'olivedrab3' : (154, 205, 50, 255) ,
'orangered3' : (205, 55, 0, 255) ,
'orangered2' : (238, 64, 0, 255) ,
'orangered1' : (255, 69, 0, 255) ,
'darkorchid' : (153, 50, 204, 255) ,
'thistle3' : (205, 181, 205, 255) ,
'thistle2' : (238, 210, 238, 255) ,
'thistle1' : (255, 225, 255, 255) ,
'salmon' : (250, 128, 114, 255) ,
'gray93' : (237, 237, 237, 255) ,
'thistle4' : (139, 123, 139, 255) ,
'gray39' : (99, 99, 99, 255) ,
'lawngreen' : (124, 252, 0, 255) ,
'hotpink3' : (205, 96, 144, 255) ,
'hotpink2' : (238, 106, 167, 255) ,
'hotpink1' : (255, 110, 180, 255) ,
'lightgreen' : (144, 238, 144, 255) ,
'hotpink4' : (139, 58, 98, 255) ,
'darkseagreen4' : (105, 139, 105, 255) ,
'darkseagreen3' : (155, 205, 155, 255) ,
'darkseagreen2' : (180, 238, 180, 255) ,
'darkseagreen1' : (193, 255, 193, 255) ,
'deepskyblue4' : (0, 104, 139, 255) ,
'gray44' : (112, 112, 112, 255) ,
'navyblue' : (0, 0, 128, 255) ,
'darkblue' : (0, 0, 139, 255) ,
'forestgreen' : (34, 139, 34, 255) ,
'gray53' : (135, 135, 135, 255) ,
'grey100' : (255, 255, 255, 255) ,
'brown1' : (255, 64, 64, 255) ,
}
| agpl-3.0 |
mcus/SickRage | lib/babelfish/language.py | 79 | 6874 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from collections import namedtuple
from functools import partial
from pkg_resources import resource_stream # @UnresolvedImport
from .converters import ConverterManager
from .country import Country
from .exceptions import LanguageConvertError
from .script import Script
from . import basestr
LANGUAGES = set()
LANGUAGE_MATRIX = []
#: The namedtuple used in the :data:`LANGUAGE_MATRIX`
IsoLanguage = namedtuple('IsoLanguage', ['alpha3', 'alpha3b', 'alpha3t', 'alpha2', 'scope', 'type', 'name', 'comment'])
f = resource_stream('babelfish', 'data/iso-639-3.tab')
f.readline()
for l in f:
iso_language = IsoLanguage(*l.decode('utf-8').split('\t'))
LANGUAGES.add(iso_language.alpha3)
LANGUAGE_MATRIX.append(iso_language)
f.close()
class LanguageConverterManager(ConverterManager):
""":class:`~babelfish.converters.ConverterManager` for language converters"""
entry_point = 'babelfish.language_converters'
internal_converters = ['alpha2 = babelfish.converters.alpha2:Alpha2Converter',
'alpha3b = babelfish.converters.alpha3b:Alpha3BConverter',
'alpha3t = babelfish.converters.alpha3t:Alpha3TConverter',
'name = babelfish.converters.name:NameConverter',
'scope = babelfish.converters.scope:ScopeConverter',
'type = babelfish.converters.type:LanguageTypeConverter',
'opensubtitles = babelfish.converters.opensubtitles:OpenSubtitlesConverter']
language_converters = LanguageConverterManager()
class LanguageMeta(type):
"""The :class:`Language` metaclass
Dynamically redirect :meth:`Language.frommycode` to :meth:`Language.fromcode` with the ``mycode`` `converter`
"""
def __getattr__(cls, name):
if name.startswith('from'):
return partial(cls.fromcode, converter=name[4:])
return type.__getattribute__(cls, name)
class Language(LanguageMeta(str('LanguageBase'), (object,), {})):
"""A human language
A human language is composed of a language part following the ISO-639
standard and can be country-specific when a :class:`~babelfish.country.Country`
is specified.
The :class:`Language` is extensible with custom converters (see :ref:`custom_converters`)
:param string language: the language as a 3-letter ISO-639-3 code
:param country: the country (if any) as a 2-letter ISO-3166 code or :class:`~babelfish.country.Country` instance
:type country: string or :class:`~babelfish.country.Country` or None
:param script: the script (if any) as a 4-letter ISO-15924 code or :class:`~babelfish.script.Script` instance
:type script: string or :class:`~babelfish.script.Script` or None
:param unknown: the unknown language as a three-letters ISO-639-3 code to use as fallback
:type unknown: string or None
:raise: ValueError if the language could not be recognized and `unknown` is ``None``
"""
def __init__(self, language, country=None, script=None, unknown=None):
if unknown is not None and language not in LANGUAGES:
language = unknown
if language not in LANGUAGES:
raise ValueError('%r is not a valid language' % language)
self.alpha3 = language
self.country = None
if isinstance(country, Country):
self.country = country
elif country is None:
self.country = None
else:
self.country = Country(country)
self.script = None
if isinstance(script, Script):
self.script = script
elif script is None:
self.script = None
else:
self.script = Script(script)
@classmethod
def fromcode(cls, code, converter):
"""Create a :class:`Language` by its `code` using `converter` to
:meth:`~babelfish.converters.LanguageReverseConverter.reverse` it
:param string code: the code to reverse
:param string converter: name of the :class:`~babelfish.converters.LanguageReverseConverter` to use
:return: the corresponding :class:`Language` instance
:rtype: :class:`Language`
"""
return cls(*language_converters[converter].reverse(code))
@classmethod
def fromietf(cls, ietf):
"""Create a :class:`Language` by from an IETF language code
:param string ietf: the ietf code
:return: the corresponding :class:`Language` instance
:rtype: :class:`Language`
"""
subtags = ietf.split('-')
language_subtag = subtags.pop(0).lower()
if len(language_subtag) == 2:
language = cls.fromalpha2(language_subtag)
else:
language = cls(language_subtag)
while subtags:
subtag = subtags.pop(0)
if len(subtag) == 2:
language.country = Country(subtag.upper())
else:
language.script = Script(subtag.capitalize())
if language.script is not None:
if subtags:
raise ValueError('Wrong IETF format. Unmatched subtags: %r' % subtags)
break
return language
def __getstate__(self):
return self.alpha3, self.country, self.script
def __setstate__(self, state):
self.alpha3, self.country, self.script = state
def __getattr__(self, name):
alpha3 = self.alpha3
country = self.country.alpha2 if self.country is not None else None
script = self.script.code if self.script is not None else None
try:
return language_converters[name].convert(alpha3, country, script)
except KeyError:
raise AttributeError(name)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, basestr):
return str(self) == other
if not isinstance(other, Language):
return False
return (self.alpha3 == other.alpha3 and
self.country == other.country and
self.script == other.script)
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.alpha3 != 'und'
__nonzero__ = __bool__
def __repr__(self):
return '<Language [%s]>' % self
def __str__(self):
try:
s = self.alpha2
except LanguageConvertError:
s = self.alpha3
if self.country is not None:
s += '-' + str(self.country)
if self.script is not None:
s += '-' + str(self.script)
return s
| gpl-3.0 |
saleemjaveds/https-github.com-openstack-nova | nova/openstack/common/fixture/config.py | 69 | 3062 | #
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
import six
class Config(fixtures.Fixture):
"""Allows overriding configuration settings for the test.
`conf` will be reset on cleanup.
"""
def __init__(self, conf=cfg.CONF):
self.conf = conf
def setUp(self):
super(Config, self).setUp()
# NOTE(morganfainberg): unregister must be added to cleanup before
# reset is because cleanup works in reverse order of registered items,
# and a reset must occur before unregistering options can occur.
self.addCleanup(self._unregister_config_opts)
self.addCleanup(self.conf.reset)
self._registered_config_opts = {}
def config(self, **kw):
"""Override configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a `group` argument is supplied, the overrides are applied to
the specified configuration option group, otherwise the overrides
are applied to the ``default`` group.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
def _unregister_config_opts(self):
for group in self._registered_config_opts:
self.conf.unregister_opts(self._registered_config_opts[group],
group=group)
def register_opt(self, opt, group=None):
"""Register a single option for the test run.
Options registered in this manner will automatically be unregistered
during cleanup.
If a `group` argument is supplied, it will register the new option
to that group, otherwise the option is registered to the ``default``
group.
"""
self.conf.register_opt(opt, group=group)
self._registered_config_opts.setdefault(group, set()).add(opt)
def register_opts(self, opts, group=None):
"""Register multiple options for the test run.
This works in the same manner as register_opt() but takes a list of
options as the first argument. All arguments will be registered to the
same group if the ``group`` argument is supplied, otherwise all options
will be registered to the ``default`` group.
"""
for opt in opts:
self.register_opt(opt, group=group)
| apache-2.0 |
biota/sourcetracker2 | sourcetracker/_q2/tests/test_method.py | 1 | 8986 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
# www.biota.com
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import unittest
import tempfile
import pandas as pd
from biom import Table, load_table
from qiime2 import Artifact
from qiime2 import Metadata
from numpy.testing import assert_allclose
from qiime2.plugins.sourcetracker2.actions import gibbs
from sourcetracker._q2._visualizer import (barplot,
assignment_barplot)
class Test_QIIME2_gibbs(unittest.TestCase):
def setUp(self):
# different cli perams to test
# all used in the example section
self.examples = {'example1': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': False,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example2': {'mapping': 'alt-map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': False,
'source_sink_column': 'source-or-sink',
'source_column_value': 'src',
'sink_column_value': 'snk',
'source_category_column': 'sample-type',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example3': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': True,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example4': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 25,
'delay': 2,
'loo': False,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1000,
'source_rarefaction_depth': 1000},
'example5': {'mapping': 'map.txt',
'restarts': 2,
'draws_per_restart': 3,
'burnin': 10,
'delay': 2,
'loo': False,
'source_sink_column': 'SourceSink',
'source_column_value': 'source',
'sink_column_value': 'sink',
'source_category_column': 'Env',
'sink_rarefaction_depth': 1700,
'source_rarefaction_depth': 1500}}
def test_q2_gibbs(self):
"""Tests that the Q2 and standalone gibbs results match.
Also validates against ground truth "expected" results.
"""
crnt_dir = os.path.dirname(os.path.abspath(__file__))
tst_pth = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, os.pardir)
# test the cli for each example peram set
for exmp_i, perams in self.examples.items():
# get the tables input pth and out pth
tbl_pth = os.path.join(tst_pth, 'data/tiny-test/otu_table.biom')
tax_pth = os.path.join(tst_pth, 'data/tiny-test/taxonomy.qza')
mta_pth = os.path.join(
tst_pth, 'data/tiny-test', perams['mapping'])
# import tables
q2table = Artifact.import_data("FeatureTable[Frequency]",
load_table(tbl_pth))
q2tax = Artifact.load(tax_pth)
q2meta = Metadata(pd.read_csv(mta_pth,
sep='\t',
index_col=0))
# Run gemelli through QIIME 2 (specifically, the Artifact API)
# save a few as var to avoid long lines
rs_ = perams['source_rarefaction_depth']
rss_ = perams['sink_rarefaction_depth']
scv_ = perams['source_column_value']
scc_ = perams['source_category_column']
draw_ = perams['draws_per_restart']
ssc_ = perams['source_sink_column']
sincv_ = perams['sink_column_value']
mp, mpstd, fas, fasmf = gibbs(q2table,
q2meta,
loo=perams['loo'],
source_rarefaction_depth=rs_,
sink_rarefaction_depth=rss_,
restarts=perams['restarts'],
draws_per_restart=draw_,
burnin=perams['burnin'],
delay=perams['delay'],
source_sink_column=ssc_,
source_column_value=scv_,
sink_column_value=sincv_,
source_category_column=scc_)
# run prop barplot
with tempfile.TemporaryDirectory() as output_dir:
barplot(output_dir,
mp.view(pd.DataFrame),
q2meta,
scc_)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
# run a per-sink prop
if perams['loo']:
per_ = 'drainwater'
else:
per_ = 's0'
with tempfile.TemporaryDirectory() as output_dir:
assignment_barplot(output_dir,
fas.view(pd.DataFrame),
q2tax.view(pd.DataFrame),
fasmf.view(pd.DataFrame),
per_)
index_fp = os.path.join(output_dir, 'index.html')
self.assertTrue(os.path.exists(index_fp))
# Get the underlying data from these artifacts
res_mp = mp.view(Table).to_dataframe().T
# check mixing proportions from cli
exp_pth = os.path.join(crnt_dir,
os.pardir,
os.pardir,
'_cli',
'tests',
'data',
'exp_' + exmp_i,
'mixing_proportions.txt')
exp_mp = pd.read_csv(exp_pth, sep='\t', index_col=0).T
# compare the results
assert_allclose(exp_mp,
res_mp.loc[exp_mp.index,
exp_mp.columns],
atol=.50)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
simonwydooghe/ansible | test/units/modules/network/fortios/test_fortios_log_syslogd_filter.py | 21 | 9184 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_syslogd_filter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_syslogd_filter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_syslogd_filter_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_syslogd_filter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_syslogd_filter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_syslogd_filter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_filter': {
'random_attribute_not_valid': 'tag',
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
citrix-openstack-build/nova | nova/virt/hyperv/vmops.py | 4 | 16960 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import functools
import os
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='qemu-img is used to convert between '
'different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive'),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help='Enables metrics collections for an instance by using '
'Hyper-V\'s metric APIs. Collected data can by retrieved '
'by other apps and services, e.g.: Ceilometer. '
'Requires Hyper-V / Windows Server 2012 and above'),
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help='Enables dynamic memory allocation (ballooning) when '
'set to a value greater than 1. The value expresses '
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
'implies 512MB of RAM allocated at startup')
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
_vif_driver_class_map = {
'nova.network.neutronv2.api.API':
'nova.virt.hyperv.vif.HyperVNeutronVIFDriver',
'nova.network.api.API':
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
def _load_vif_driver_class(self):
try:
class_name = self._vif_driver_class_map[CONF.network_api_class]
self._vif_driver = importutils.import_object(class_name)
except KeyError:
raise TypeError(_("VIF driver not found for "
"network_api_class: %s") %
CONF.network_api_class)
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
instance_name = instance['name']
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return {'state': state,
'max_mem': info['MemoryUsage'],
'mem': info['MemoryUsage'],
'num_cpu': info['NumberOfProcessors'],
'cpu_time': info['UpTime']}
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance['name'],
format_ext)
try:
if CONF.use_cow_images:
LOG.debug(_("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
else:
LOG.debug(_("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['FileSize']
root_vhd_size = instance['root_gb'] * 1024 ** 3
if root_vhd_size < base_vhd_size:
raise vmutils.HyperVException(_("Cannot resize a VHD to a "
"smaller size"))
elif root_vhd_size > base_vhd_size:
LOG.debug(_("Resizing VHD %(root_vhd_path)s to new "
"size %(root_vhd_size)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._vhdutils.resize_vhd(root_vhd_path, root_vhd_size)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * 1024 ** 3
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()
eph_vhd_path = self._pathutils.get_ephemeral_vhd_path(
instance['name'], vhd_format)
self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size,
vhd_format)
return eph_vhd_path
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_("Spawning new instance"), instance=instance)
instance_name = instance['name']
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._create_root_vhd(context, instance)
eph_vhd_path = self.create_ephemeral_vhd(instance)
try:
self.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
self.power_on(instance)
except Exception as ex:
LOG.exception(ex)
self.destroy(instance)
raise vmutils.HyperVException(_('Spawn instance failed'))
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path):
instance_name = instance['name']
self._vmutils.create_vm(instance_name,
instance['memory_mb'],
instance['vcpus'],
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio)
ctrl_disk_addr = 0
if root_vhd_path:
self._vmutils.attach_ide_drive(instance_name,
root_vhd_path,
0,
ctrl_disk_addr,
constants.IDE_DISK)
ctrl_disk_addr += 1
if eph_vhd_path:
self._vmutils.attach_ide_drive(instance_name,
eph_vhd_path,
0,
ctrl_disk_addr,
constants.IDE_DISK)
self._vmutils.create_scsi_controller(instance_name)
self._volumeops.attach_volumes(block_device_info,
instance_name,
root_vhd_path is None)
for vif in network_info:
LOG.debug(_('Creating nic for instance: %s'), instance_name)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
self._vmutils.enable_vm_metrics_collection(instance_name)
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_('Using config drive for instance: %s'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
instance_path = self._pathutils.get_instance_dir(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
self._vmutils.attach_ide_drive(instance['name'], configdrive_path,
1, 0, drive_type)
def _disconnect_volumes(self, volume_drives):
for volume_drive in volume_drives:
self._volumeops.disconnect_volume(volume_drive)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance['name']
LOG.info(_("Got request to destroy instance: %s"), instance_name)
try:
if self._vmutils.vm_exists(instance_name):
#Stop the VM first.
self.power_off(instance)
storage = self._vmutils.get_vm_storage_paths(instance_name)
(disk_files, volume_drives) = storage
self._vmutils.destroy_vm(instance_name)
self._disconnect_volumes(volume_drives)
else:
LOG.debug(_("Instance not found: %s"), instance_name)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception as ex:
LOG.exception(ex)
raise vmutils.HyperVException(_('Failed to destroy instance: %s') %
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug(_("reboot instance"), instance=instance)
self._set_vm_state(instance['name'],
constants.HYPERV_VM_STATE_REBOOT)
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_DISABLED)
def power_on(self, instance):
"""Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, vm_name, req_state):
try:
self._vmutils.set_vm_state(vm_name, req_state)
LOG.debug(_("Successfully changed state of VM %(vm_name)s"
" to: %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
except Exception as ex:
LOG.exception(ex)
msg = (_("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") %
{'vm_name': vm_name, 'req_state': req_state})
raise vmutils.HyperVException(msg)
| apache-2.0 |
stscieisenhamer/glue | glue/plugins/tools/spectrum_tool/qt/tests/test_spectrum_tool.py | 1 | 8521 | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from mock import MagicMock
from glue.core.fitters import PolynomialFitter
from glue.core.roi import RectangularROI
from glue.core import Data, Coordinates
from glue.core.tests.util import simple_session
from glue.tests.helpers import requires_astropy
from glue.viewers.image.qt import ImageViewer
from ..spectrum_tool import Extractor, ConstraintsWidget, FitSettingsWidget, SpectrumTool, CollapseContext
needs_modeling = lambda x: x
try:
from glue.core.fitters import SimpleAstropyGaussianFitter
except ImportError:
needs_modeling = pytest.mark.skipif(True, reason='Needs astropy >= 0.3')
class MockCoordinates(Coordinates):
def pixel2world(self, *args):
return [a * 2 for a in args]
def world2pixel(self, *args):
return [a / 2 for a in args]
class BaseTestSpectrumTool:
def setup_data(self):
self.data = Data(x=np.zeros((3, 3, 3)))
def setup_method(self, method):
self.setup_data()
session = simple_session()
session.data_collection.append(self.data)
self.image = ImageViewer(session)
self.image.add_data(self.data)
self.image.data = self.data
self.image.attribute = self.data.id['x']
self.mode = self.image.toolbar.tools['spectrum']
self.tool = self.mode._tool
self.tool.show = lambda *args: None
def teardown_method(self, method):
if self.image is not None:
self.image.close()
self.image = None
if self.tool is not None:
self.tool.close()
self.tool = None
class TestSpectrumTool(BaseTestSpectrumTool):
def build_spectrum(self):
roi = RectangularROI()
roi.update_limits(0, 2, 0, 2)
self.tool._update_profile()
def test_reset_on_view_change(self):
self.build_spectrum()
self.tool.widget = MagicMock()
self.tool.widget.isVisible.return_value = True
self.tool.hide = MagicMock()
self.image.state.x_att_world = self.data.world_component_ids[0]
assert self.tool.hide.call_count > 0
# For some reason we need to close and dereference the image and tool
# here (and not in teardown_method) otherwise we are left with
# references to the image viewer.
self.image.close()
self.image = None
self.tool.close()
self.tool = None
class Test3DExtractor(object):
def setup_method(self, method):
self.data = Data()
self.data.coords = MockCoordinates()
self.data.add_component(np.random.random((3, 4, 5)), label='x')
self.x = self.data['x']
def test_abcissa(self):
expected = [0, 2, 4]
actual = Extractor.abcissa(self.data, 0)
np.testing.assert_equal(expected, actual)
expected = [0, 2, 4, 6]
actual = Extractor.abcissa(self.data, 1)
np.testing.assert_equal(expected, actual)
expected = [0, 2, 4, 6, 8]
actual = Extractor.abcissa(self.data, 2)
np.testing.assert_equal(expected, actual)
def test_spectrum(self):
roi = RectangularROI()
roi.update_limits(0.5, 1.5, 2.5, 2.5)
expected = self.x[:, 1:3, 2:3].mean(axis=1).mean(axis=1)
_, actual = Extractor.spectrum(
self.data, self.data.id['x'], roi, (0, 'x', 'y'), 0)
np.testing.assert_array_almost_equal(expected, actual)
def test_spectrum_oob(self):
roi = RectangularROI()
roi.update_limits(-1, -1, 3, 3)
expected = self.x[:, :3, :3].mean(axis=1).mean(axis=1)
_, actual = Extractor.spectrum(self.data, self.data.id['x'],
roi, (0, 'x', 'y'), 0)
np.testing.assert_array_almost_equal(expected, actual)
def test_pixel2world(self):
# p2w(x) = 2x, 0 <= x <= 2
assert Extractor.pixel2world(self.data, 0, 1) == 2
# clips to boundary
assert Extractor.pixel2world(self.data, 0, -1) == 0
assert Extractor.pixel2world(self.data, 0, 5) == 4
def test_world2pixel(self):
# w2p(x) = x/2, 0 <= x <= 4
assert Extractor.world2pixel(self.data, 0, 2.01) == 1
# clips to boundary
assert Extractor.world2pixel(self.data, 0, -1) == 0
assert Extractor.world2pixel(self.data, 0, 8) == 2
def test_extract_subset(self):
sub = self.data.new_subset()
sub.subset_state = self.data.id['x'] > .5
slc = (0, 'y', 'x')
mask = sub.to_mask()[0]
mask = mask.reshape(-1, mask.shape[0], mask.shape[1])
expected = (self.x * mask).sum(axis=1).sum(axis=1)
expected /= mask.sum(axis=1).sum(axis=1)
_, actual = Extractor.subset_spectrum(sub, self.data.id['x'],
slc, 0)
np.testing.assert_array_almost_equal(expected, actual)
class Test4DExtractor(object):
def setup_method(self, method):
self.data = Data()
self.data.coords = MockCoordinates()
x, y, z, w = np.mgrid[:3, :4, :5, :4]
self.data.add_component(1. * w, label='x')
def test_extract(self):
roi = RectangularROI()
roi.update_limits(0, 0, 2, 3)
expected = self.data['x'][:, :2, :3, 1].mean(axis=1).mean(axis=1)
_, actual = Extractor.spectrum(self.data, self.data.id['x'],
roi, (0, 'x', 'y', 1), 0)
np.testing.assert_array_equal(expected, actual)
class TestConstraintsWidget(object):
def setup_method(self, method):
self.constraints = dict(a=dict(fixed=True, value=1, limits=None))
self.widget = ConstraintsWidget(self.constraints)
def test_settings(self):
assert self.widget.settings('a') == dict(fixed=True, value=1,
limits=None)
def test_update_settings(self):
self.widget._widgets['a'][2].setChecked(False)
assert self.widget.settings('a')['fixed'] is False
def test_update_constraints(self):
self.widget._widgets['a'][2].setChecked(False)
fitter = MagicMock()
self.widget.update_constraints(fitter)
fitter.set_constraint.assert_called_once_with('a',
fixed=False, value=1,
limits=None)
class TestFitSettingsWidget(object):
def test_option(self):
f = PolynomialFitter()
f.degree = 1
w = FitSettingsWidget(f)
w.widgets['degree'].setValue(5)
w.update_fitter_from_settings()
assert f.degree == 5
@needs_modeling
def test_set_constraints(self):
f = SimpleAstropyGaussianFitter()
w = FitSettingsWidget(f)
w.constraints._widgets['amplitude'][2].setChecked(True)
w.update_fitter_from_settings()
assert f.constraints['amplitude']['fixed']
def test_4d_single_channel():
x = np.random.random((1, 7, 5, 9))
d = Data(x=x)
slc = (0, 0, 'x', 'y')
zaxis = 1
expected = x[0, :, :, :].mean(axis=1).mean(axis=1)
roi = RectangularROI()
roi.update_limits(-0.5, -0.5, 10.5, 10.5)
_, actual = Extractor.spectrum(d, d.id['x'], roi, slc, zaxis)
np.testing.assert_array_almost_equal(expected, actual)
@requires_astropy
class TestCollapseContext(BaseTestSpectrumTool):
def test_collapse(self, tmpdir):
roi = RectangularROI()
roi.update_limits(0, 2, 0, 2)
self.tool._update_profile()
self._save(tmpdir)
# For some reason we need to close and dereference the image and tool
# here (and not in teardown_method) otherwise we are left with
# references to the image viewer.
self.image.close()
self.image = None
self.tool.close()
self.tool = None
def _save(self, tmpdir):
for context in self.tool._contexts:
if isinstance(context, CollapseContext):
break
else:
raise ValueError("Could not find collapse context")
context.save_to(tmpdir.join('test.fits').strpath)
@requires_astropy
class TestCollapseContextWCS(TestCollapseContext):
def setup_data(self):
from glue.core.coordinates import coordinates_from_wcs
from astropy.wcs import WCS
wcs = WCS(naxis=3)
self.data = Data(x=np.zeros((3, 3, 3)))
self.data.coords = coordinates_from_wcs(wcs)
| bsd-3-clause |
MostafaGazar/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/densify.py | 22 | 1982 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transforms Sparse to Dense Tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import sparse_ops
class Densify(transform.TensorFlowTransform):
"""Transforms Sparse to Dense Tensor."""
def __init__(self,
default_value):
super(Densify, self).__init__()
self._default_value = default_value
@transform.parameter
def default_value(self):
return self._default_value
@property
def name(self):
return "Densify"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
s = input_tensors[0]
# pylint: disable=not-callable
return self.return_type(sparse_ops.sparse_to_dense(
s.indices, s.shape, s.values, default_value=self.default_value))
| apache-2.0 |
haylesr/angr | angr/analyses/loopfinder.py | 1 | 4933 | import networkx
import logging
from ..analysis import Analysis, register_analysis
l = logging.getLogger('angr.analyses.loops')
class Loop(object):
def __init__(self, entry, entry_edges, break_edges, continue_edges, body_nodes, graph, subloops):
self.entry = entry
self.entry_edges = entry_edges
self.break_edges = break_edges
self.continue_edges = continue_edges
self.body_nodes = body_nodes
self.graph = graph
self.subloops = subloops
self.has_calls = any(map(lambda loop: loop.has_calls, subloops))
if not self.has_calls:
for _, _, data in self.graph.edges_iter(data=True):
if 'type' in data and data['type'] == 'fake_return':
# this is a function call.
self.has_calls = True
break
class LoopFinder(Analysis):
"""
Extracts all the loops from all the functions in a binary.
"""
def __init__(self, functions=None):
if functions is None:
functions = self.kb.functions.itervalues()
found_any = False
self.loops = []
for function in functions:
found_any = True
with self._resilience():
function.normalize()
self.loops += self._parse_loops_from_graph(function.graph)
if not found_any:
l.error("No knowledge of functions is present. Did you forget to construct a CFG?")
def _parse_loop_graph(self, subg, bigg):
"""
Create a Loop object for a strongly connected graph, and any strongly
connected subgraphs, if possible.
:param subg: A strongly connected subgraph.
:param bigg: The graph which subg is a subgraph of.
:return: A list of Loop objects, some of which may be inside others,
but all need to be documented.
"""
loop_body_nodes = subg.nodes()[:]
entry_edges = []
break_edges = []
continue_edges = []
entry_node = None
for node in loop_body_nodes:
for pred_node in bigg.predecessors(node):
if pred_node not in loop_body_nodes:
if entry_node is not None and entry_node != node:
l.warning("Bad loop: more than one entry point (%#x, %#x)", entry_node, node)
return []
entry_node = node
entry_edges.append((pred_node, node))
subg.add_edge(pred_node, node)
for succ_node in bigg.successors(node):
if succ_node not in loop_body_nodes:
break_edges.append((node, succ_node))
subg.add_edge(node, succ_node)
if entry_node is None:
entry_node = min(loop_body_nodes, key=lambda n: n.addr)
l.info("Couldn't find entry point, assuming it's the first by address (%#x)", entry_node)
acyclic_subg = subg.copy()
for pred_node in subg.predecessors(entry_node):
if pred_node in loop_body_nodes:
continue_edge = (pred_node, entry_node)
acyclic_subg.remove_edge(*continue_edge)
continue_edges.append(continue_edge)
subloops = self._parse_loops_from_graph(acyclic_subg)
for subloop in subloops:
if subloop.entry in loop_body_nodes:
# break existing entry edges, exit edges
# re-link in loop object
for entry_edge in subloop.entry_edges:
subg.remove_edge(*entry_edge)
subg.add_edge(entry_edge[0], subloop)
for exit_edge in subloop.break_edges:
subg.remove_edge(*exit_edge)
subg.add_edge(subloop, exit_edge[1])
subg = filter(lambda g: entry_node in g.nodes(),
networkx.weakly_connected_component_subgraphs(subg))[0]
subloops.append(Loop(entry_node,
entry_edges,
break_edges,
continue_edges,
loop_body_nodes,
subg,
subloops[:]))
return subloops
def _parse_loops_from_graph(self, graph):
"""
Return all Loop instances that can be extracted from a graph.
:param graph: The graph to analyze.
:return: A list of all the Loop instances that were found in the graph.
"""
out = []
for subg in networkx.strongly_connected_component_subgraphs(graph):
if len(subg.nodes()) == 1:
if len(subg.successors(subg.nodes()[0])) == 0:
continue
out += self._parse_loop_graph(subg, graph)
return out
register_analysis(LoopFinder, 'LoopFinder')
| bsd-2-clause |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib-python/2.7/ctypes/test/test_macholib.py | 36 | 1798 | import os
import sys
import unittest
# Bob Ippolito:
"""
Ok.. the code to find the filename for __getattr__ should look
something like:
import os
from macholib.dyld import dyld_find
def find_lib(name):
possible = ['lib'+name+'.dylib', name+'.dylib',
name+'.framework/'+name]
for dylib in possible:
try:
return os.path.realpath(dyld_find(dylib))
except ValueError:
pass
raise ValueError, "%s not found" % (name,)
It'll have output like this:
>>> find_lib('pthread')
'/usr/lib/libSystem.B.dylib'
>>> find_lib('z')
'/usr/lib/libz.1.dylib'
>>> find_lib('IOKit')
'/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit'
-bob
"""
from ctypes.macholib.dyld import dyld_find
def find_lib(name):
possible = ['lib'+name+'.dylib', name+'.dylib', name+'.framework/'+name]
for dylib in possible:
try:
return os.path.realpath(dyld_find(dylib))
except ValueError:
pass
raise ValueError("%s not found" % (name,))
class MachOTest(unittest.TestCase):
@unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test')
def test_find(self):
self.assertEqual(find_lib('pthread'),
'/usr/lib/libSystem.B.dylib')
result = find_lib('z')
# Issue #21093: dyld default search path includes $HOME/lib and
# /usr/local/lib before /usr/lib, which caused test failures if
# a local copy of libz exists in one of them. Now ignore the head
# of the path.
self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib")
self.assertEqual(find_lib('IOKit'),
'/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit')
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
esrf-emotion/emotion | tests/TestCustomCommandsAttributes.py | 2 | 1904 | import unittest
import sys
import os
sys.path.insert(
0,
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..")))
import emotion
config_xml = """
<config>
<controller class="mockup" name="test">
<host value="mydummyhost1"/>
<port value="5000"/>
<axis name="robz">
<!-- degrees per second -->
<velocity value="100"/>
</axis>
</controller>
<controller class="mockup">
<host value="mydummyhost2"/>
<port value="5000"/>
<axis name="roby">
<backlash value="2"/>
<steps_per_unit value="10"/>
<velocity value="2500"/>
</axis>
</controller>
</config>
"""
class TestMockupController(unittest.TestCase):
def setUp(self):
emotion.load_cfg_fromstring(config_xml)
def test_get_axis(self):
robz = emotion.get_axis("robz")
self.assertTrue(robz)
def test_get_custom_methods_list(self):
robz = emotion.get_axis("robz")
print "\ncustom functions :"
for (fname, types) in robz.custom_methods_list:
print fname, types
def test_custom_park(self):
robz = emotion.get_axis("robz")
robz.custom_park()
def test_custom_get_forty_two(self):
robz = emotion.get_axis("robz")
print robz.custom_get_forty_two()
def test_custom_get_twice(self):
robz = emotion.get_axis("robz")
self.assertEqual(robz.custom_get_twice(42), 84)
def test_custom_get_chapi(self):
robz = emotion.get_axis("robz")
self.assertEqual(robz.custom_get_chapi("chapi"), "chapo")
self.assertEqual(robz.custom_get_chapi("titi"), "toto")
self.assertEqual(robz.custom_get_chapi("roooh"), "bla")
def test_custom_send_command(self):
robz = emotion.get_axis("robz")
robz.custom_send_command("SALUT sent")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
dkubiak789/odoo | addons/l10n_it/__openerp__.py | 267 | 1992 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011-2012
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italy - Accounting',
'version': '0.2',
'depends': ['base_vat','account_chart','base_iban'],
'author': 'OpenERP Italian Community',
'description': """
Piano dei conti italiano di un'impresa generica.
================================================
Italian accounting chart and localization.
""",
'license': 'AGPL-3',
'category': 'Localization/Account Charts',
'website': 'http://www.openerp-italia.org/',
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'account_chart.xml',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'l10n_chart_it_generic.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
GoUbiq/pyexchange | tests/exchange2010/test_get_event.py | 5 | 16481 | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from httpretty import HTTPretty, httprettified, activate
import unittest
from pytest import raises
from pyexchange import Exchange2010Service
from pyexchange.connection import ExchangeNTLMAuthConnection
from pyexchange.exceptions import * # noqa
from .fixtures import * # noqa
class Test_ParseEventResponseData(unittest.TestCase):
event = None
@classmethod
def setUpClass(cls):
@activate # this decorator doesn't play nice with @classmethod
def fake_event_request():
service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL, username=FAKE_EXCHANGE_USERNAME, password=FAKE_EXCHANGE_PASSWORD
)
)
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
return service.calendar().get_event(id=TEST_EVENT.id)
cls.event = fake_event_request()
def test_canary(self):
assert self.event is not None
def test_event_id_was_not_changed(self):
assert self.event.id == TEST_EVENT.id
def test_event_has_a_subject(self):
assert self.event.subject == TEST_EVENT.subject
def test_event_has_a_location(self):
assert self.event.location == TEST_EVENT.location
def test_event_has_a_body(self):
assert self.event.html_body == TEST_EVENT.body
assert self.event.text_body == TEST_EVENT.body
assert self.event.body == TEST_EVENT.body
def test_event_starts_at_the_right_time(self):
assert self.event.start == TEST_EVENT.start
def test_event_ends_at_the_right_time(self):
assert self.event.end == TEST_EVENT.end
def test_event_has_an_organizer(self):
assert self.event.organizer is not None
assert self.event.organizer.name == ORGANIZER.name
assert self.event.organizer.email == ORGANIZER.email
def test_event_has_the_correct_attendees(self):
assert len(self.event.attendees) > 0
assert len(self.event.attendees) == len(ATTENDEE_LIST)
def _test_person_values_are_correct(self, fixture):
try:
self.event.attendees.index(fixture)
except ValueError as e:
print(u"An attendee should be in the list but isn't:", fixture)
raise e
def test_all_attendees_are_present_and_accounted_for(self):
# this is a nose test generator if you haven't seen one before
# it creates one test for each attendee
for attendee in ATTENDEE_LIST:
yield self._test_person_values_are_correct, attendee
def test_resources_are_correct(self):
assert self.event.resources == [RESOURCE]
def test_conference_room_alias(self):
assert self.event.conference_room == RESOURCE
def test_required_attendees_are_required(self):
assert sorted(self.event.required_attendees) == sorted(REQUIRED_PEOPLE)
def test_optional_attendees_are_optional(self):
assert sorted(self.event.optional_attendees) == sorted(OPTIONAL_PEOPLE)
def test_conflicting_event_ids(self):
assert self.event.conflicting_event_ids[0] == TEST_CONFLICT_EVENT.id
@httprettified
def test_conflicting_events(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CONFLICTING_EVENTS_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
conflicting_events = self.event.conflicting_events()
assert conflicting_events[0].id == TEST_CONFLICT_EVENT.id
assert conflicting_events[0].calendar_id == TEST_CONFLICT_EVENT.calendar_id
assert conflicting_events[0].subject == TEST_CONFLICT_EVENT.subject
assert conflicting_events[0].location == TEST_CONFLICT_EVENT.location
assert conflicting_events[0].start == TEST_CONFLICT_EVENT.start
assert conflicting_events[0].end == TEST_CONFLICT_EVENT.end
assert conflicting_events[0].body == TEST_CONFLICT_EVENT.body
assert conflicting_events[0].conflicting_event_ids[0] == TEST_EVENT.id
class Test_FailingToGetEvents(unittest.TestCase):
service = None
@classmethod
def setUpClass(cls):
cls.service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL, username=FAKE_EXCHANGE_USERNAME, password=FAKE_EXCHANGE_PASSWORD
)
)
@activate
def test_requesting_an_event_id_that_doest_exist_throws_exception(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=ITEM_DOES_NOT_EXIST.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
with raises(ExchangeItemNotFoundException):
self.service.calendar().get_event(id=TEST_EVENT.id)
@activate
def test_requesting_an_event_and_getting_a_500_response_throws_exception(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=u"",
status=500,
content_type='text/xml; charset=utf-8',
)
with raises(FailedExchangeException):
self.service.calendar().get_event(id=TEST_EVENT.id)
@activate
def test_requesting_an_event_and_getting_garbage_xml_throws_exception(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=u"<garbage xml",
status=200,
content_type='text/xml; charset=utf-8',
)
with raises(FailedExchangeException):
self.service.calendar().get_event(id=TEST_EVENT.id)
class Test_GetRecurringMasterEvents(unittest.TestCase):
service = None
event = None
@classmethod
def setUpClass(cls):
cls.service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=FAKE_EXCHANGE_PASSWORD
)
)
@httprettified
def test_get_recurring_daily_event(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_DAILY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
event = self.service.calendar(id=TEST_RECURRING_EVENT_DAILY.calendar_id).get_event(
id=TEST_RECURRING_EVENT_DAILY.id
)
assert event.id == TEST_RECURRING_EVENT_DAILY.id
assert event.calendar_id == TEST_RECURRING_EVENT_DAILY.calendar_id
assert event.subject == TEST_RECURRING_EVENT_DAILY.subject
assert event.location == TEST_RECURRING_EVENT_DAILY.location
assert event.start == TEST_RECURRING_EVENT_DAILY.start
assert event.end == TEST_RECURRING_EVENT_DAILY.end
assert event.body == TEST_RECURRING_EVENT_DAILY.body
assert event.html_body == TEST_RECURRING_EVENT_DAILY.body
assert event.recurrence == 'daily'
assert event.recurrence_interval == TEST_RECURRING_EVENT_DAILY.recurrence_interval
assert event.recurrence_end_date == TEST_RECURRING_EVENT_DAILY.recurrence_end_date
@httprettified
def test_get_recurring_weekly_event(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_WEEKLY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
event = self.service.calendar(id=TEST_RECURRING_EVENT_WEEKLY.calendar_id).get_event(
id=TEST_RECURRING_EVENT_WEEKLY.id
)
assert event.id == TEST_RECURRING_EVENT_WEEKLY.id
assert event.calendar_id == TEST_RECURRING_EVENT_WEEKLY.calendar_id
assert event.subject == TEST_RECURRING_EVENT_WEEKLY.subject
assert event.location == TEST_RECURRING_EVENT_WEEKLY.location
assert event.start == TEST_RECURRING_EVENT_WEEKLY.start
assert event.end == TEST_RECURRING_EVENT_WEEKLY.end
assert event.body == TEST_RECURRING_EVENT_WEEKLY.body
assert event.html_body == TEST_RECURRING_EVENT_WEEKLY.body
assert event.recurrence == 'weekly'
assert event.recurrence_interval == TEST_RECURRING_EVENT_WEEKLY.recurrence_interval
assert event.recurrence_end_date == TEST_RECURRING_EVENT_WEEKLY.recurrence_end_date
@httprettified
def test_get_recurring_monthly_event(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_MONTHLY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
event = self.service.calendar(id=TEST_RECURRING_EVENT_MONTHLY.calendar_id).get_event(
id=TEST_RECURRING_EVENT_MONTHLY.id
)
assert event.id == TEST_RECURRING_EVENT_MONTHLY.id
assert event.calendar_id == TEST_RECURRING_EVENT_MONTHLY.calendar_id
assert event.subject == TEST_RECURRING_EVENT_MONTHLY.subject
assert event.location == TEST_RECURRING_EVENT_MONTHLY.location
assert event.start == TEST_RECURRING_EVENT_MONTHLY.start
assert event.end == TEST_RECURRING_EVENT_MONTHLY.end
assert event.body == TEST_RECURRING_EVENT_MONTHLY.body
assert event.html_body == TEST_RECURRING_EVENT_MONTHLY.body
assert event.recurrence == 'monthly'
assert event.recurrence_interval == TEST_RECURRING_EVENT_MONTHLY.recurrence_interval
assert event.recurrence_end_date == TEST_RECURRING_EVENT_MONTHLY.recurrence_end_date
@httprettified
def test_get_recurring_yearly_event(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_YEARLY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
event = self.service.calendar(id=TEST_RECURRING_EVENT_YEARLY.calendar_id).get_event(
id=TEST_RECURRING_EVENT_YEARLY.id
)
assert event.id == TEST_RECURRING_EVENT_YEARLY.id
assert event.calendar_id == TEST_RECURRING_EVENT_YEARLY.calendar_id
assert event.subject == TEST_RECURRING_EVENT_YEARLY.subject
assert event.location == TEST_RECURRING_EVENT_YEARLY.location
assert event.start == TEST_RECURRING_EVENT_YEARLY.start
assert event.end == TEST_RECURRING_EVENT_YEARLY.end
assert event.body == TEST_RECURRING_EVENT_YEARLY.body
assert event.html_body == TEST_RECURRING_EVENT_YEARLY.body
assert event.recurrence == 'yearly'
assert event.recurrence_end_date == TEST_RECURRING_EVENT_YEARLY.recurrence_end_date
class Test_GetOccurence(unittest.TestCase):
service = None
event = None
@classmethod
@httprettified
def setUpClass(self):
self.service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=FAKE_EXCHANGE_PASSWORD
)
)
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_DAILY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event = self.service.calendar(id=TEST_RECURRING_EVENT_DAILY.calendar_id).get_event(
id=TEST_RECURRING_EVENT_DAILY.id
)
@httprettified
def test_get_daily_event_occurrences(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_DAILY_OCCURRENCES.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
occurrences = self.event.get_occurrence(range(5))
for occ in range(len(occurrences)):
assert occurrences[occ].id == TEST_EVENT_DAILY_OCCURRENCES[occ].id
assert occurrences[occ].subject == TEST_EVENT_DAILY_OCCURRENCES[occ].subject
assert occurrences[occ].location == TEST_EVENT_DAILY_OCCURRENCES[occ].location
assert occurrences[occ].start == TEST_EVENT_DAILY_OCCURRENCES[occ].start
assert occurrences[occ].end == TEST_EVENT_DAILY_OCCURRENCES[occ].end
assert occurrences[occ].body == TEST_EVENT_DAILY_OCCURRENCES[occ].body
assert occurrences[occ].calendar_id == TEST_EVENT_DAILY_OCCURRENCES[occ].calendar_id
assert occurrences[occ].type == 'Occurrence'
@httprettified
def test_get_daily_event_occurrences_fail_from_occurrence(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_DAILY_OCCURRENCES.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
occurrences = self.event.get_occurrence(range(5))
for occ in range(len(occurrences)):
with raises(InvalidEventType):
occurrences[occ].get_occurrence(range(5))
@httprettified
def test_get_daily_event_occurrences_empty(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_EMPTY_OCCURRENCES.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
occurrences = self.event.get_occurrence(range(5))
assert type(occurrences) == list
assert len(occurrences) == 0
class Test_InvalidEventTypeFromSingle(unittest.TestCase):
service = None
event = None
@classmethod
@httprettified
def setUpClass(self):
self.service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=FAKE_EXCHANGE_PASSWORD
)
)
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event = self.service.calendar().get_event(
id=TEST_EVENT.id
)
def test_get_daily_event_occurrences_fail(self):
with raises(InvalidEventType):
self.event.get_occurrence(range(5))
def test_get_daily_event_master_fail(self):
with raises(InvalidEventType):
self.event.get_master()
class Test_GetMaster(unittest.TestCase):
service = None
event = None
@classmethod
@httprettified
def setUpClass(self):
self.service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=FAKE_EXCHANGE_PASSWORD
)
)
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_EVENT_OCCURRENCE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event = self.service.calendar().get_event(
id=TEST_EVENT_DAILY_OCCURRENCES[0].id
)
@httprettified
def test_get_master_success(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_DAILY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
master = self.event.get_master()
assert master.id == TEST_RECURRING_EVENT_DAILY.id
assert master.calendar_id == TEST_RECURRING_EVENT_DAILY.calendar_id
assert master.subject == TEST_RECURRING_EVENT_DAILY.subject
assert master.location == TEST_RECURRING_EVENT_DAILY.location
assert master.start == TEST_RECURRING_EVENT_DAILY.start
assert master.end == TEST_RECURRING_EVENT_DAILY.end
assert master.body == TEST_RECURRING_EVENT_DAILY.body
assert master.html_body == TEST_RECURRING_EVENT_DAILY.body
assert master.recurrence == 'daily'
assert master.recurrence_interval == TEST_RECURRING_EVENT_DAILY.recurrence_interval
assert master.recurrence_end_date == TEST_RECURRING_EVENT_DAILY.recurrence_end_date
@httprettified
def test_get_master_fail_from_master(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_DAILY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
master = self.event.get_master()
with raises(InvalidEventType):
master.get_master()
class Test_GetConflictingEventsEmpty(unittest.TestCase):
event = None
@classmethod
def setUpClass(self):
@activate # this decorator doesn't play nice with @classmethod
def fake_event_request():
service = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL, username=FAKE_EXCHANGE_USERNAME, password=FAKE_EXCHANGE_PASSWORD
)
)
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=GET_RECURRING_MASTER_DAILY_EVENT.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
return service.calendar().get_event(id=TEST_EVENT.id)
self.event = fake_event_request()
def test_conflicting_event_ids_empty(self):
assert len(self.event.conflicting_event_ids) == 0
def test_conflicting_events_empty(self):
assert len(self.event.conflicting_events()) == 0
| apache-2.0 |
Y3K/django | django/contrib/gis/geos/libgeos.py | 345 | 6218 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory(object):
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, **kwargs):
self.func_name = func_name
self.restype = kwargs.pop('restype', self.restype)
self.errcheck = kwargs.pop('errcheck', self.errcheck)
self.argtypes = kwargs.pop('argtypes', self.argtypes)
self.args = args
self.kwargs = kwargs
self.func = None
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = self.get_func(*self.args, **self.kwargs)
return self.func(*args, **kwargs)
def get_func(self, *args, **kwargs):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p)
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
| bsd-3-clause |
muravjov/ansible | v2/ansible/playbook/role/__init__.py | 7 | 14635 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
import os
from hashlib import sha1
from types import NoneType
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
from ansible.plugins import module_loader
from ansible.utils.vars import combine_vars
__all__ = ['Role', 'ROLE_CACHE', 'hash_params']
# FIXME: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
# in a static method. This is also used in the base class for
# strategies (ansible/plugins/strategies/__init__.py)
def hash_params(params):
if not isinstance(params, dict):
return params
else:
s = set()
for k,v in params.iteritems():
if isinstance(v, dict):
s.update((k, hash_params(v)))
elif isinstance(v, list):
things = []
for item in v:
things.append(hash_params(item))
s.update((k, tuple(things)))
else:
s.update((k, v))
return frozenset(s)
# The role cache is used to prevent re-loading roles, which
# may already exist. Keys into this cache are the SHA1 hash
# of the role definition (for dictionary definitions, this
# will be based on the repr() of the dictionary object)
ROLE_CACHE = dict()
class Role(Base, Conditional, Taggable):
def __init__(self):
self._role_name = None
self._role_path = None
self._role_params = dict()
self._loader = None
self._metadata = None
self._parents = []
self._dependencies = []
self._task_blocks = []
self._handler_blocks = []
self._default_vars = dict()
self._role_vars = dict()
self._had_task_run = False
self._completed = False
super(Role, self).__init__()
def __repr__(self):
return self.get_name()
def get_name(self):
return self._role_name
@staticmethod
def load(role_include, parent_role=None):
# FIXME: add back in the role caching support
try:
# The ROLE_CACHE is a dictionary of role names, with each entry
# containing another dictionary corresponding to a set of parameters
# specified for a role as the key and the Role() object itself.
# We use frozenset to make the dictionary hashable.
#hashed_params = frozenset(role_include.get_role_params().iteritems())
hashed_params = hash_params(role_include.get_role_params())
if role_include.role in ROLE_CACHE:
for (entry, role_obj) in ROLE_CACHE[role_include.role].iteritems():
if hashed_params == entry:
if parent_role:
role_obj.add_parent(parent_role)
return role_obj
r = Role()
r._load_role_data(role_include, parent_role=parent_role)
if role_include.role not in ROLE_CACHE:
ROLE_CACHE[role_include.role] = dict()
ROLE_CACHE[role_include.role][hashed_params] = r
return r
except RuntimeError:
# FIXME: needs a better way to access the ds in the role include
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds)
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
self._role_path = role_include.get_role_path()
self._role_params = role_include.get_role_params()
self._variable_manager = role_include.get_variable_manager()
self._loader = role_include.get_loader()
if parent_role:
self.add_parent(parent_role)
current_when = getattr(self, 'when')[:]
current_when.extend(role_include.when)
setattr(self, 'when', current_when)
current_tags = getattr(self, 'tags')[:]
current_tags.extend(role_include.tags)
setattr(self, 'tags', current_tags)
# save the current base directory for the loader and set it to the current role path
#cur_basedir = self._loader.get_basedir()
#self._loader.set_basedir(self._role_path)
# load the role's files, if they exist
library = os.path.join(self._role_path, 'library')
if os.path.isdir(library):
module_loader.add_directory(library)
metadata = self._load_role_yaml('meta')
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, loader=self._loader)
self._dependencies = self._load_dependencies()
task_data = self._load_role_yaml('tasks')
if task_data:
self._task_blocks = load_list_of_blocks(task_data, role=self, loader=self._loader)
handler_data = self._load_role_yaml('handlers')
if handler_data:
self._handler_blocks = load_list_of_blocks(handler_data, role=self, loader=self._loader)
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars')
if not isinstance(self._role_vars, (dict, NoneType)):
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds)
elif self._role_vars is None:
self._role_vars = dict()
self._default_vars = self._load_role_yaml('defaults')
if not isinstance(self._default_vars, (dict, NoneType)):
raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds)
elif self._default_vars is None:
self._default_vars = dict()
# and finally restore the previous base directory
#self._loader.set_basedir(cur_basedir)
def _load_role_yaml(self, subdir):
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
main_file = self._resolve_main(file_path)
if self._loader.path_exists(main_file):
return self._loader.load_from_file(main_file)
return None
def _resolve_main(self, basepath):
''' flexibly handle variations in main filenames '''
possible_mains = (
os.path.join(basepath, 'main.yml'),
os.path.join(basepath, 'main.yaml'),
os.path.join(basepath, 'main.json'),
os.path.join(basepath, 'main'),
)
if sum([self._loader.is_file(x) for x in possible_mains]) > 1:
raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
else:
for m in possible_mains:
if self._loader.is_file(m):
return m # exactly one main file
return possible_mains[0] # zero mains (we still need to return something)
def _load_dependencies(self):
'''
Recursively loads role dependencies from the metadata list of
dependencies, if it exists
'''
deps = []
if self._metadata:
for role_include in self._metadata.dependencies:
r = Role.load(role_include, parent_role=self)
deps.append(r)
return deps
#------------------------------------------------------------------------------
# other functions
def add_parent(self, parent_role):
''' adds a role to the list of this roles parents '''
assert isinstance(parent_role, Role)
if parent_role not in self._parents:
self._parents.append(parent_role)
def get_parents(self):
return self._parents
def get_default_vars(self):
# FIXME: get these from dependent roles too
default_vars = dict()
for dep in self.get_all_dependencies():
default_vars = combine_vars(default_vars, dep.get_default_vars())
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
def get_inherited_vars(self):
inherited_vars = dict()
for parent in self._parents:
inherited_vars = combine_vars(inherited_vars, parent.get_inherited_vars())
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
inherited_vars = combine_vars(inherited_vars, parent._role_params)
return inherited_vars
def get_vars(self):
all_vars = self.get_inherited_vars()
for dep in self.get_all_dependencies():
all_vars = combine_vars(all_vars, dep.get_vars())
all_vars = combine_vars(all_vars, self._role_vars)
all_vars = combine_vars(all_vars, self._role_params)
return all_vars
def get_direct_dependencies(self):
return self._dependencies[:]
def get_all_dependencies(self):
'''
Returns a list of all deps, built recursively from all child dependencies,
in the proper order in which they should be executed or evaluated.
'''
child_deps = []
for dep in self.get_direct_dependencies():
for child_dep in dep.get_all_dependencies():
child_deps.append(child_dep)
child_deps.append(dep)
return child_deps
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self):
return self._handler_blocks[:]
def has_run(self):
'''
Returns true if this role has been iterated over completely and
at least one task was run
'''
return self._had_task_run and self._completed
def compile(self, dep_chain=[]):
'''
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
then adding on the tasks for this role.
The role compile() also remembers and saves the dependency chain
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
'''
block_list = []
# update the dependency chain here
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(dep_chain=new_dep_chain)
for dep_block in dep_blocks:
# since we're modifying the task, and need it to be unique,
# we make a copy of it here and assign the dependency chain
# to the copy, then append the copy to the task list.
new_dep_block = dep_block.copy()
new_dep_block._dep_chain = new_dep_chain
block_list.append(new_dep_block)
block_list.extend(self._task_blocks)
return block_list
def serialize(self, include_deps=True):
res = super(Role, self).serialize()
res['_role_name'] = self._role_name
res['_role_path'] = self._role_path
res['_role_vars'] = self._role_vars
res['_role_params'] = self._role_params
res['_default_vars'] = self._default_vars
res['_had_task_run'] = self._had_task_run
res['_completed'] = self._completed
if self._metadata:
res['_metadata'] = self._metadata.serialize()
if include_deps:
deps = []
for role in self.get_direct_dependencies():
deps.append(role.serialize())
res['_dependencies'] = deps
parents = []
for parent in self._parents:
parents.append(parent.serialize(include_deps=False))
res['_parents'] = parents
return res
def deserialize(self, data, include_deps=True):
self._role_name = data.get('_role_name', '')
self._role_path = data.get('_role_path', '')
self._role_vars = data.get('_role_vars', dict())
self._role_params = data.get('_role_params', dict())
self._default_vars = data.get('_default_vars', dict())
self._had_task_run = data.get('_had_task_run', False)
self._completed = data.get('_completed', False)
if include_deps:
deps = []
for dep in data.get('_dependencies', []):
r = Role()
r.deserialize(dep)
deps.append(r)
setattr(self, '_dependencies', deps)
parent_data = data.get('_parents', [])
parents = []
for parent in parent_data:
r = Role()
r.deserialize(parent, include_deps=False)
parents.append(r)
setattr(self, '_parents', parents)
metadata_data = data.get('_metadata')
if metadata_data:
m = RoleMetadata()
m.deserialize(metadata_data)
self._metadata = m
super(Role, self).deserialize(data)
def set_loader(self, loader):
self._loader = loader
for parent in self._parents:
parent.set_loader(loader)
for dep in self.get_direct_dependencies():
dep.set_loader(loader)
| gpl-3.0 |
ztane/zsos | userland/lib/python2.5/distutils/mwerkscompiler.py | 87 | 10325 | """distutils.mwerkscompiler
Contains MWerksCompiler, an implementation of the abstract CCompiler class
for MetroWerks CodeWarrior on the Macintosh. Needs work to support CW on
Windows."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: mwerkscompiler.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os, string
from types import *
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
import distutils.util
import distutils.dir_util
from distutils import log
import mkcwproject
class MWerksCompiler (CCompiler) :
"""Concrete class that implements an interface to MetroWerks CodeWarrior,
as defined by the CCompiler abstract class."""
compiler_type = 'mwerks'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.r']
_exp_extension = '.exp'
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions)
res_extension = '.rsrc'
obj_extension = '.obj' # Not used, really
static_lib_extension = '.lib'
shared_lib_extension = '.slb'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = ''
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
def compile (self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
(output_dir, macros, include_dirs) = \
self._fix_compile_args (output_dir, macros, include_dirs)
self.__sources = sources
self.__macros = macros
self.__include_dirs = include_dirs
# Don't need extra_preargs and extra_postargs for CW
return []
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# First fixup.
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
# First examine a couple of options for things that aren't implemented yet
if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
if runtime_library_dirs:
raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
if extra_preargs or extra_postargs:
raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
if len(export_symbols) != 1:
raise DistutilsPlatformError, 'Need exactly one export symbol'
# Next there are various things for which we need absolute pathnames.
# This is because we (usually) create the project in a subdirectory of
# where we are now, and keeping the paths relative is too much work right
# now.
sources = map(self._filename_to_abs, self.__sources)
include_dirs = map(self._filename_to_abs, self.__include_dirs)
if objects:
objects = map(self._filename_to_abs, objects)
else:
objects = []
if build_temp:
build_temp = self._filename_to_abs(build_temp)
else:
build_temp = os.curdir()
if output_dir:
output_filename = os.path.join(output_dir, output_filename)
# The output filename needs special handling: splitting it into dir and
# filename part. Actually I'm not sure this is really needed, but it
# can't hurt.
output_filename = self._filename_to_abs(output_filename)
output_dir, output_filename = os.path.split(output_filename)
# Now we need the short names of a couple of things for putting them
# into the project.
if output_filename[-8:] == '.ppc.slb':
basename = output_filename[:-8]
elif output_filename[-11:] == '.carbon.slb':
basename = output_filename[:-11]
else:
basename = os.path.strip(output_filename)[0]
projectname = basename + '.mcp'
targetname = basename
xmlname = basename + '.xml'
exportname = basename + '.mcp.exp'
prefixname = 'mwerks_%s_config.h'%basename
# Create the directories we need
distutils.dir_util.mkpath(build_temp, dry_run=self.dry_run)
distutils.dir_util.mkpath(output_dir, dry_run=self.dry_run)
# And on to filling in the parameters for the project builder
settings = {}
settings['mac_exportname'] = exportname
settings['mac_outputdir'] = output_dir
settings['mac_dllname'] = output_filename
settings['mac_targetname'] = targetname
settings['sysprefix'] = sys.prefix
settings['mac_sysprefixtype'] = 'Absolute'
sourcefilenames = []
sourcefiledirs = []
for filename in sources + objects:
dirname, filename = os.path.split(filename)
sourcefilenames.append(filename)
if not dirname in sourcefiledirs:
sourcefiledirs.append(dirname)
settings['sources'] = sourcefilenames
settings['libraries'] = libraries
settings['extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
if self.dry_run:
print 'CALLING LINKER IN', os.getcwd()
for key, value in settings.items():
print '%20.20s %s'%(key, value)
return
# Build the export file
exportfilename = os.path.join(build_temp, exportname)
log.debug("\tCreate export file %s", exportfilename)
fp = open(exportfilename, 'w')
fp.write('%s\n'%export_symbols[0])
fp.close()
# Generate the prefix file, if needed, and put it in the settings
if self.__macros:
prefixfilename = os.path.join(os.getcwd(), os.path.join(build_temp, prefixname))
fp = open(prefixfilename, 'w')
fp.write('#include "mwerks_shcarbon_config.h"\n')
for name, value in self.__macros:
if value is None:
fp.write('#define %s\n'%name)
else:
fp.write('#define %s %s\n'%(name, value))
fp.close()
settings['prefixname'] = prefixname
# Build the XML file. We need the full pathname (only lateron, really)
# because we pass this pathname to CodeWarrior in an AppleEvent, and CW
# doesn't have a clue about our working directory.
xmlfilename = os.path.join(os.getcwd(), os.path.join(build_temp, xmlname))
log.debug("\tCreate XML file %s", xmlfilename)
xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
xmlbuilder.generate()
xmldata = settings['tmp_projectxmldata']
fp = open(xmlfilename, 'w')
fp.write(xmldata)
fp.close()
# Generate the project. Again a full pathname.
projectfilename = os.path.join(os.getcwd(), os.path.join(build_temp, projectname))
log.debug('\tCreate project file %s', projectfilename)
mkcwproject.makeproject(xmlfilename, projectfilename)
# And build it
log.debug('\tBuild project')
mkcwproject.buildproject(projectfilename)
def _filename_to_abs(self, filename):
# Some filenames seem to be unix-like. Convert to Mac names.
## if '/' in filename and ':' in filename:
## raise DistutilsPlatformError, 'Filename may be Unix or Mac style: %s'%filename
## if '/' in filename:
## filename = macurl2path(filename)
filename = distutils.util.convert_path(filename)
if not os.path.isabs(filename):
curdir = os.getcwd()
filename = os.path.join(curdir, filename)
# Finally remove .. components
components = string.split(filename, ':')
for i in range(1, len(components)):
if components[i] == '..':
components[i] = ''
return string.join(components, ':')
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
return # XXXX Not correct...
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
# Nothing needed or Mwerks/Mac.
return
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
return
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
return 0
| gpl-3.0 |
ficristo/brackets-shell | gyp/pylib/gyp/generator/xcode.py | 1363 | 58344 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
generator_filelist_paths = None
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def CalculateGeneratorInputInfo(params):
toplevel = params['options'].toplevel_dir
if params.get('flavor') == 'ninja':
generator_dir = os.path.relpath(params['options'].generator_output or '.')
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
output_dir = os.path.normpath(os.path.join(generator_dir, output_dir))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles-xcode-ninja'))
else:
output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild'))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
upgrade_check_project_version = \
generator_flags.get('xcode_upgrade_check_project_version', None)
# Format upgrade_check_project_version with leading zeros as needed.
if upgrade_check_project_version:
upgrade_check_project_version = str(upgrade_check_project_version)
while len(upgrade_check_project_version) < 4:
upgrade_check_project_version = '0' + upgrade_check_project_version
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
# Set project-level attributes from multiple options
project_attributes = {};
if parallel_builds:
project_attributes['BuildIndependentTargetsInParallel'] = 'YES'
if upgrade_check_project_version:
project_attributes['LastUpgradeCheck'] = upgrade_check_project_version
project_attributes['LastTestingUpgradeCheck'] = \
upgrade_check_project_version
project_attributes['LastSwiftUpdateCheck'] = \
upgrade_check_project_version
pbxp.SetProperty('attributes', project_attributes)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'mac_kernel_extension': 'com.apple.product-type.kernel-extension',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle':
'com.apple.product-type.application.watchapp',
'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| mit |
Venturi/oldcms | env/lib/python2.7/site-packages/django/views/generic/base.py | 82 | 8771 | from __future__ import unicode_literals
import logging
import warnings
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch, reverse
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.decorators import classonlymethod
from django.utils.deprecation import RemovedInDjango19Warning
_sentinel = object()
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = _sentinel
url = None
pattern_name = None
query_string = False
def __init__(self, *args, **kwargs):
if 'permanent' not in kwargs and self.permanent is _sentinel:
warnings.warn(
"Default value of 'RedirectView.permanent' will change "
"from True to False in Django 1.9. Set an explicit value "
"to silence this warning.",
RemovedInDjango19Warning,
stacklevel=2
)
self.permanent = True
super(RedirectView, self).__init__(*args, **kwargs)
@classonlymethod
def as_view(cls, **initkwargs):
if 'permanent' not in initkwargs and cls.permanent is _sentinel:
warnings.warn(
"Default value of 'RedirectView.permanent' will change "
"from True to False in Django 1.9. Set an explicit value "
"to silence this warning.",
RemovedInDjango19Warning,
stacklevel=2
)
initkwargs['permanent'] = True
return super(RedirectView, cls).as_view(**initkwargs)
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', request.path,
extra={
'status_code': 410,
'request': request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| apache-2.0 |
andmos/ansible | lib/ansible/modules/windows/win_rds_rap.py | 10 | 2933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Subileau (@ksubileau)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_rds_rap
short_description: Manage Resource Authorization Policies (RAP) on a Remote Desktop Gateway server
description:
- Creates, removes and configures a Remote Desktop resource authorization policy (RD RAP).
- A RD RAP allows you to specify the network resources (computers) that users can connect
to remotely through a Remote Desktop Gateway server.
version_added: "2.8"
author:
- Kevin Subileau (@ksubileau)
options:
name:
description:
- Name of the resource authorization policy.
required: yes
state:
description:
- The state of resource authorization policy.
- If C(absent) will ensure the policy is removed.
- If C(present) will ensure the policy is configured and exists.
- If C(enabled) will ensure the policy is configured, exists and enabled.
- If C(disabled) will ensure the policy is configured, exists, but disabled.
choices: [ absent, present, enabled, disabled ]
default: present
description:
description:
- Optionnal description of the resource authorization policy.
user_groups:
description:
- List of user groups that are associated with this resource authorization policy (RAP).
A user must belong to one of these groups to access the RD Gateway server.
- Required when a new RAP is created.
type: list
allowed_ports:
description:
- List of port numbers through which connections are allowed for this policy.
- To allow connections through any port, specify 'any'.
type: list
computer_group_type:
description:
- 'The computer group type:'
- 'C(rdg_group): RD Gateway-managed group'
- 'C(ad_network_resource_group): Active Directory Domain Services network resource group'
- 'C(allow_any): Allow users to connect to any network resource.'
choices: [ rdg_group, ad_network_resource_group, allow_any ]
computer_group:
description:
- The computer group name that is associated with this resource authorization policy (RAP).
- This is required when I(computer_group_type) is C(rdg_group) or C(ad_network_resource_group).
requirements:
- Windows Server 2008R2 (6.1) or higher.
- The Windows Feature "RDS-Gateway" must be enabled.
'''
EXAMPLES = r'''
- name: Create a new RDS RAP
win_rds_rap:
name: My RAP
description: 'Allow all users to connect to any resource through ports 3389 and 3390'
user_groups:
- BUILTIN\users
computer_group_type: allow_any
allowed_ports:
- 3389
- 3390
state: enabled
'''
RETURN = r'''
'''
| gpl-3.0 |
aral/isvat | django/contrib/admin/helpers.py | 100 | 13631 | from __future__ import unicode_literals
from django import forms
from django.contrib.admin.util import (flatten_fieldsets, lookup_field,
display_for_field, label_for_field, help_text_for_field)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.util import flatatt
from django.template.defaultfilters import capfirst
from django.utils.encoding import force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, six.string_types):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return next(iter(self.form))
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe('\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
else:
contents += ':'
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = classes and {'class': ' '.join(classes)} or {}
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
label = label_for_field(field, form._meta.model, model_admin)
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ != '<lambda>' and field.__name__ or ''
else:
class_name = field
self.field = {
'name': class_name,
'label': label,
'field': field,
'help_text': help_text_for_field(class_name, form._meta.model)
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{0}>{1}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if isinstance(f.rel, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field:
continue
if field in self.readonly_fields:
yield {
'label': label_for_field(field, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False
}
else:
yield self.formset.form.base_fields[field]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def has_auto_field(self):
if self.form._meta.model._meta.has_auto_field:
return True
# Also search any parents for an auto field.
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(list(six.itervalues(form.errors)))
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(list(six.itervalues(errors_in_inline_form)))
def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
| mit |
tmuelle2/phantomjs | src/qt/qtbase/src/3rdparty/angle/src/libGLESv2/Float16ToFloat32.py | 329 | 1897 | # Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script generates a function that converts 16-bit precision floating
# point numbers to 32-bit.
# It is based on ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf.
def convertMantissa(i):
if i == 0:
return 0
elif i < 1024:
m = i << 13
e = 0
while not (m & 0x00800000):
e -= 0x00800000
m = m << 1
m &= ~0x00800000
e += 0x38800000
return m | e
else:
return 0x38000000 + ((i - 1024) << 13)
def convertExponent(i):
if i == 0:
return 0
elif i in range(1, 31):
return i << 23
elif i == 31:
return 0x47800000
elif i == 32:
return 0x80000000
elif i in range(33, 63):
return 0x80000000 + ((i - 32) << 23)
else:
return 0xC7800000
def convertOffset(i):
if i == 0 or i == 32:
return 0
else:
return 1024
print """//
// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file is automatically generated.
namespace gl
{
"""
print "const static unsigned g_mantissa[2048] = {"
for i in range(0, 2048):
print " %#010x," % convertMantissa(i)
print "};\n"
print "const static unsigned g_exponent[64] = {"
for i in range(0, 64):
print " %#010x," % convertExponent(i)
print "};\n"
print "const static unsigned g_offset[64] = {"
for i in range(0, 64):
print " %#010x," % convertOffset(i)
print "};\n"
print """float float16ToFloat32(unsigned short h)
{
unsigned i32 = g_mantissa[g_offset[h >> 10] + (h & 0x3ff)] + g_exponent[h >> 10];
return *(float*) &i32;
}
}
"""
| bsd-3-clause |
ycaihua/kbengine | kbe/src/lib/python/Lib/test/test_userdict.py | 80 | 6575 | # Check every path through every method of UserDict
from test import support, mapping_tests
import collections
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(mapping_tests.TestHashMappingProtocol):
type2test = collections.UserDict
def test_all(self):
# Test constructors
u = collections.UserDict()
u0 = collections.UserDict(d0)
u1 = collections.UserDict(d1)
u2 = collections.UserDict(d2)
uu = collections.UserDict(u)
uu0 = collections.UserDict(u0)
uu1 = collections.UserDict(u1)
uu2 = collections.UserDict(u2)
# keyword arg constructor
self.assertEqual(collections.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(collections.UserDict([('one',1), ('two',2)]), d2)
self.assertEqual(collections.UserDict(dict=[('one',1), ('two',2)]), d2)
# both together
self.assertEqual(collections.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(collections.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(collections.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(collections.UserDict().fromkeys('one two'.split(), 1), d5)
self.assertTrue(u1.fromkeys('one two'.split()) is not u1)
self.assertIsInstance(u1.fromkeys('one two'.split()), collections.UserDict)
self.assertIsInstance(u2.fromkeys('one two'.split()), collections.UserDict)
# Test __repr__
self.assertEqual(str(u0), str(d0))
self.assertEqual(repr(u1), repr(d1))
self.assertIn(repr(u2), ("{'one': 1, 'two': 2}",
"{'two': 2, 'one': 1}"))
# Test rich comparison and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(a == b, len(a) == len(b))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = collections.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = collections.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(collections.UserDict):
def display(self): print(self)
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# Test keys, items, values
self.assertEqual(sorted(u2.keys()), sorted(d2.keys()))
self.assertEqual(sorted(u2.items()), sorted(d2.items()))
self.assertEqual(sorted(u2.values()), sorted(d2.values()))
# Test "in".
for i in u2.keys():
self.assertIn(i, u2)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u0, i in d0)
# Test update
t = collections.UserDict()
t.update(u2)
self.assertEqual(t, u2)
# Test get
for i in u2.keys():
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in range(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = u2.keys()
self.assertEqual(set(ikeys), set(keys))
# Test setdefault
t = collections.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assertIn("x", t)
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = collections.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = collections.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
def test_missing(self):
# Make sure UserDict doesn't have a __missing__ method
self.assertEqual(hasattr(collections.UserDict, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(collections.UserDict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(collections.UserDict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(collections.UserDict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
collections.UserDict.__init__(self)
f = F()
try:
f[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(collections.UserDict):
pass
g = G()
try:
g[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
def test_main():
support.run_unittest(
UserDictTest,
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
EvolutionClip/pyload | module/plugins/hoster/FilepostCom.py | 1 | 4945 | # -*- coding: utf-8 -*-
import re
from time import time
from module.common.json_layer import json_loads
from module.plugins.internal.CaptchaService import ReCaptcha
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class FilepostCom(SimpleHoster):
__name__ = "FilepostCom"
__type__ = "hoster"
__version__ = "0.32"
__pattern__ = r'https?://(?:www\.)?(?:filepost\.com/files|fp\.io)/(?P<ID>[^/]+)'
__description__ = """Filepost.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[\d.,]+) (?P<U>[\w^_]+)</a>\' class="inp_text"/>'
OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
PREMIUM_ONLY_PATTERN = r'members only. Please upgrade to premium|a premium membership is required to download this file'
RECAPTCHA_PATTERN = r'Captcha.init\({\s*key:\s*\'(.+?)\''
FLP_TOKEN_PATTERN = r'set_store_options\({token: \'(.+?)\''
def handleFree(self, pyfile):
m = re.search(self.FLP_TOKEN_PATTERN, self.html)
if m is None:
self.error(_("Token"))
flp_token = m.group(1)
m = re.search(self.RECAPTCHA_PATTERN, self.html)
if m is None:
self.error(_("Captcha key"))
captcha_key = m.group(1)
# Get wait time
get_dict = {'SID': self.req.cj.getCookie('SID'), 'JsHttpRequest': str(int(time() * 10000)) + '-xml'}
post_dict = {'action': 'set_download', 'token': flp_token, 'code': self.info['pattern']['ID']}
wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
if wait_time > 0:
self.wait(wait_time)
post_dict = {"token": flp_token, "code": self.info['pattern']['ID'], "file_pass": ''}
if 'var is_pass_exists = true;' in self.html:
# Solve password
password = self.getPassword()
if password:
self.logInfo(_("Password protected link, trying ") + file_pass)
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
post_dict['file_pass'] = file_pass
self.link = self.getJsonResponse(get_dict, post_dict, 'link')
if not self.link:
self.fail(_("Incorrect password"))
else:
self.fail(_("No password found"))
else:
# Solve recaptcha
recaptcha = ReCaptcha(self)
for i in xrange(5):
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
if i:
post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field'] = recaptcha.challenge(
captcha_key)
self.logDebug(u"RECAPTCHA: %s : %s : %s" % (
captcha_key, post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field']))
download_url = self.getJsonResponse(get_dict, post_dict, 'link')
if download_url:
if i:
self.correctCaptcha()
break
elif i:
self.invalidCaptcha()
else:
self.fail(_("Invalid captcha"))
# Download
self.download(download_url)
def getJsonResponse(self, get_dict, post_dict, field):
res = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict))
self.logDebug(res)
if not 'js' in res:
self.error(_("JSON %s 1") % field)
# i changed js_answer to res['js'] since js_answer is nowhere set.
# i don't know the JSON-HTTP specs in detail, but the previous author
# accessed res['js']['error'] as well as js_answer['error'].
# see the two lines commented out with "# ~?".
if 'error' in res['js']:
if res['js']['error'] == 'download_delay':
self.retry(wait_time=res['js']['params']['next_download'])
# ~? self.retry(wait_time=js_answer['params']['next_download'])
elif ('Wrong file password' in res['js']['error']
or 'You entered a wrong CAPTCHA code' in res['js']['error']
or 'CAPTCHA Code nicht korrekt' in res['js']['error']):
return None
elif 'CAPTCHA' in res['js']['error']:
self.logDebug("Error response is unknown, but mentions CAPTCHA")
return None
else:
self.fail(res['js']['error'])
if not 'answer' in res['js'] or not field in res['js']['answer']:
self.error(_("JSON %s 2") % field)
return res['js']['answer'][field]
getInfo = create_getInfo(FilepostCom)
| gpl-3.0 |
jjmleiro/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/PublicKey/__init__.py | 124 | 1876 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Public-key encryption and signature algorithms.
Public-key encryption uses two different keys, one for encryption and
one for decryption. The encryption key can be made public, and the
decryption key is kept private. Many public-key algorithms can also
be used to sign messages, and some can *only* be used for signatures.
======================== =============================================
Module Description
======================== =============================================
Crypto.PublicKey.DSA Digital Signature Algorithm (Signature only)
Crypto.PublicKey.ElGamal (Signing and encryption)
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
======================== =============================================
:undocumented: _DSA, _RSA, _fastmath, _slowmath, pubkey
"""
__all__ = ['RSA', 'DSA', 'ElGamal']
__revision__ = "$Id$"
| apache-2.0 |
xeroc/python-bitshares | bitsharesbase/operations.py | 1 | 43006 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from graphenebase.types import (
Array,
Bool,
Bytes,
Fixed_array,
Id,
Int16,
Int64,
Map,
Optional,
PointInTime,
Set,
Static_variant,
String,
Uint8,
Uint16,
Uint32,
Uint64,
Varint32,
Void,
Ripemd160,
Sha1,
Sha256,
Hash160,
)
from .account import PublicKey
from .objects import (
AccountCreateExtensions,
AccountOptions,
Asset,
AssetOptions,
BitAssetOptions,
CallOrderExtension,
GrapheneObject,
Memo,
ObjectId,
Operation,
Permission,
Price,
PriceFeed,
Worker_initializer,
isArgsThisClass,
AssertPredicate,
)
from .operationids import operations
default_prefix = "BTS"
class_idmap = {}
class_namemap = {}
def fill_classmaps():
for name, ind in operations.items():
classname = name[0:1].upper() + name[1:]
class_namemap[classname] = ind
try:
class_idmap[ind] = globals()[classname]
except Exception:
continue
def getOperationClassForId(op_id):
"""Convert an operation id into the corresponding class."""
return class_idmap[op_id] if op_id in class_idmap else None
def getOperationIdForClass(name):
"""Convert an operation classname into the corresponding id."""
return class_namemap[name] if name in class_namemap else None
def getOperationNameForId(i):
"""Convert an operation id into the corresponding string."""
for key in operations:
if int(operations[key]) is int(i):
return key
return "Unknown Operation ID %d" % i
class Transfer(GrapheneObject):
def __init__(self, *args, **kwargs):
# Allow for overwrite of prefix
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
prefix = kwargs.get("prefix", default_prefix)
if "memo" in kwargs and kwargs["memo"]:
if isinstance(kwargs["memo"], dict):
kwargs["memo"]["prefix"] = prefix
memo = Optional(Memo(**kwargs["memo"]))
else:
memo = Optional(Memo(kwargs["memo"]))
else:
memo = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("from", ObjectId(kwargs["from"], "account")),
("to", ObjectId(kwargs["to"], "account")),
("amount", Asset(kwargs["amount"])),
("memo", memo),
("extensions", Set([])),
]
)
)
class Asset_publish_feed(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("publisher", ObjectId(kwargs["publisher"], "account")),
("asset_id", ObjectId(kwargs["asset_id"], "asset")),
("feed", PriceFeed(kwargs["feed"])),
("extensions", Set([])),
]
)
)
class Asset_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if kwargs.get("bitasset_opts"):
bitasset_opts = Optional(BitAssetOptions(kwargs["bitasset_opts"]))
else:
bitasset_opts = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
("symbol", String(kwargs["symbol"])),
("precision", Uint8(kwargs["precision"])),
("common_options", AssetOptions(kwargs["common_options"])),
("bitasset_opts", bitasset_opts),
(
"is_prediction_market",
Bool(bool(kwargs["is_prediction_market"])),
),
("extensions", Set([])),
]
)
)
class Asset_update(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if "new_issuer" in kwargs:
raise ValueError(
"Cannot change asset_issuer with Asset_update anylonger! (BSIP29)"
)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
(
"asset_to_update",
ObjectId(kwargs["asset_to_update"], "asset"),
),
("new_issuer", Optional(None)),
("new_options", AssetOptions(kwargs["new_options"])),
("extensions", Set([])),
]
)
)
class Asset_update_bitasset(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
(
"asset_to_update",
ObjectId(kwargs["asset_to_update"], "asset"),
),
("new_options", BitAssetOptions(kwargs["new_options"])),
("extensions", Set([])),
]
)
)
class Asset_issue(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
prefix = kwargs.get("prefix", default_prefix)
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if "memo" in kwargs and kwargs["memo"]:
memo = Optional(Memo(prefix=prefix, **kwargs["memo"]))
else:
memo = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
("asset_to_issue", Asset(kwargs["asset_to_issue"])),
(
"issue_to_account",
ObjectId(kwargs["issue_to_account"], "account"),
),
("memo", memo),
("extensions", Set([])),
]
)
)
class Op_wrapper(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(OrderedDict([("op", Operation(kwargs["op"]))]))
class Proposal_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if "review_period_seconds" in kwargs:
review = Optional(Uint32(kwargs["review_period_seconds"]))
else:
review = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"fee_paying_account",
ObjectId(kwargs["fee_paying_account"], "account"),
),
("expiration_time", PointInTime(kwargs["expiration_time"])),
(
"proposed_ops",
Array([Op_wrapper(o) for o in kwargs["proposed_ops"]]),
),
("review_period_seconds", review),
("extensions", Set([])),
]
)
)
class Proposal_update(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
for o in [
"active_approvals_to_add",
"active_approvals_to_remove",
"owner_approvals_to_add",
"owner_approvals_to_remove",
"key_approvals_to_add",
"key_approvals_to_remove",
]:
if o not in kwargs:
kwargs[o] = []
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"fee_paying_account",
ObjectId(kwargs["fee_paying_account"], "account"),
),
("proposal", ObjectId(kwargs["proposal"], "proposal")),
(
"active_approvals_to_add",
Array(
[
ObjectId(o, "account")
for o in kwargs["active_approvals_to_add"]
]
),
),
(
"active_approvals_to_remove",
Array(
[
ObjectId(o, "account")
for o in kwargs["active_approvals_to_remove"]
]
),
),
(
"owner_approvals_to_add",
Array(
[
ObjectId(o, "account")
for o in kwargs["owner_approvals_to_add"]
]
),
),
(
"owner_approvals_to_remove",
Array(
[
ObjectId(o, "account")
for o in kwargs["owner_approvals_to_remove"]
]
),
),
(
"key_approvals_to_add",
Array(
[PublicKey(o) for o in kwargs["key_approvals_to_add"]]
),
),
(
"key_approvals_to_remove",
Array(
[
PublicKey(o)
for o in kwargs["key_approvals_to_remove"]
]
),
),
("extensions", Set([])),
]
)
)
class Limit_order_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("seller", ObjectId(kwargs["seller"], "account")),
("amount_to_sell", Asset(kwargs["amount_to_sell"])),
("min_to_receive", Asset(kwargs["min_to_receive"])),
("expiration", PointInTime(kwargs["expiration"])),
("fill_or_kill", Bool(kwargs["fill_or_kill"])),
("extensions", Set([])),
]
)
)
class Limit_order_cancel(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"fee_paying_account",
ObjectId(kwargs["fee_paying_account"], "account"),
),
("order", ObjectId(kwargs["order"], "limit_order")),
("extensions", Set([])),
]
)
)
class Call_order_update(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"funding_account",
ObjectId(kwargs["funding_account"], "account"),
),
("delta_collateral", Asset(kwargs["delta_collateral"])),
("delta_debt", Asset(kwargs["delta_debt"])),
("extensions", CallOrderExtension(kwargs["extensions"])),
]
)
)
class Asset_fund_fee_pool(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("from_account", ObjectId(kwargs["from_account"], "account")),
("asset_id", ObjectId(kwargs["asset_id"], "asset")),
("amount", Int64(kwargs["amount"])),
("extensions", Set([])),
]
)
)
class Asset_claim_fees(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
("amount_to_claim", Asset(kwargs["amount_to_claim"])),
("extensions", Set([])),
]
)
)
class Asset_claim_pool(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
("asset_id", ObjectId(kwargs["asset_id"], "asset")),
("amount_to_claim", Asset(kwargs["amount_to_claim"])),
("extensions", Set([])),
]
)
)
class Override_transfer(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if "memo" in kwargs:
memo = Optional(Memo(kwargs["memo"]))
else:
memo = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
("from", ObjectId(kwargs["from"], "account")),
("to", ObjectId(kwargs["to"], "account")),
("amount", Asset(kwargs["amount"])),
("memo", memo),
("extensions", Set([])),
]
)
)
class Account_create(GrapheneObject):
def __init__(self, *args, **kwargs):
# Allow for overwrite of prefix
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
prefix = kwargs.get("prefix", default_prefix)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("registrar", ObjectId(kwargs["registrar"], "account")),
("referrer", ObjectId(kwargs["referrer"], "account")),
("referrer_percent", Uint16(kwargs["referrer_percent"])),
("name", String(kwargs["name"])),
("owner", Permission(kwargs["owner"], prefix=prefix)),
("active", Permission(kwargs["active"], prefix=prefix)),
("options", AccountOptions(kwargs["options"], prefix=prefix)),
("extensions", AccountCreateExtensions(kwargs["extensions"])),
]
)
)
class Account_update(GrapheneObject):
def __init__(self, *args, **kwargs):
# Allow for overwrite of prefix
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
prefix = kwargs.get("prefix", default_prefix)
if "owner" in kwargs:
owner = Optional(Permission(kwargs["owner"], prefix=prefix))
else:
owner = Optional(None)
if "active" in kwargs:
active = Optional(Permission(kwargs["active"], prefix=prefix))
else:
active = Optional(None)
if "new_options" in kwargs:
options = Optional(AccountOptions(kwargs["new_options"], prefix=prefix))
else:
options = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("owner", owner),
("active", active),
("new_options", options),
("extensions", Set([])),
]
)
)
class Account_whitelist(GrapheneObject):
no_listing = 0 # < No opinion is specified about this account
white_listed = 1 # < This account is whitelisted, but not blacklisted
black_listed = 2 # < This account is blacklisted, but not whitelisted
white_and_black_listed = 3 # < This account is both whitelisted and blacklisted
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"authorizing_account",
ObjectId(kwargs["authorizing_account"], "account"),
),
(
"account_to_list",
ObjectId(kwargs["account_to_list"], "account"),
),
("new_listing", Uint8(kwargs["new_listing"])),
("extensions", Set([])),
]
)
)
class Vesting_balance_withdraw(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"vesting_balance",
ObjectId(kwargs["vesting_balance"], "vesting_balance"),
),
("owner", ObjectId(kwargs["owner"], "account")),
("amount", Asset(kwargs["amount"])),
]
)
)
class Account_upgrade(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"account_to_upgrade",
ObjectId(kwargs["account_to_upgrade"], "account"),
),
(
"upgrade_to_lifetime_member",
Bool(kwargs["upgrade_to_lifetime_member"]),
),
("extensions", Set([])),
]
)
)
class Witness_update(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if "new_url" in kwargs and kwargs["new_url"]:
new_url = Optional(String(kwargs["new_url"]))
else:
new_url = Optional(None)
if "new_signing_key" in kwargs and kwargs["new_signing_key"]:
new_signing_key = Optional(PublicKey(kwargs["new_signing_key"]))
else:
new_signing_key = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("witness", ObjectId(kwargs["witness"], "witness")),
(
"witness_account",
ObjectId(kwargs["witness_account"], "account"),
),
("new_url", new_url),
("new_signing_key", new_signing_key),
]
)
)
class Asset_update_feed_producers(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
kwargs["new_feed_producers"] = sorted(
kwargs["new_feed_producers"], key=lambda x: float(x.split(".")[2])
)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
(
"asset_to_update",
ObjectId(kwargs["asset_to_update"], "asset"),
),
(
"new_feed_producers",
Array(
[
ObjectId(o, "account")
for o in kwargs["new_feed_producers"]
]
),
),
("extensions", Set([])),
]
)
)
class Asset_reserve(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("payer", ObjectId(kwargs["payer"], "account")),
("amount_to_reserve", Asset(kwargs["amount_to_reserve"])),
("extensions", Set([])),
]
)
)
class Worker_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("owner", ObjectId(kwargs["owner"], "account")),
("work_begin_date", PointInTime(kwargs["work_begin_date"])),
("work_end_date", PointInTime(kwargs["work_end_date"])),
("daily_pay", Uint64(kwargs["daily_pay"])),
("name", String(kwargs["name"])),
("url", String(kwargs["url"])),
("initializer", Worker_initializer(kwargs["initializer"])),
]
)
)
class Withdraw_permission_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"withdraw_from_account",
ObjectId(kwargs["withdraw_from_account"], "account"),
),
(
"authorized_account",
ObjectId(kwargs["authorized_account"], "account"),
),
("withdrawal_limit", Asset(kwargs["withdrawal_limit"])),
(
"withdrawal_period_sec",
Uint32(kwargs["withdrawal_period_sec"]),
),
(
"periods_until_expiration",
Uint32(kwargs["periods_until_expiration"]),
),
("period_start_time", PointInTime(kwargs["period_start_time"])),
]
)
)
class Asset_global_settle(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
(
"asset_to_settle",
ObjectId(kwargs["asset_to_settle"], "asset"),
),
("settle_price", Price(kwargs["settle_price"])),
("extensions", Set([])),
]
)
)
class Committee_member_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"committee_member_account",
ObjectId(kwargs["committee_member_account"], "account"),
),
("url", String(kwargs["url"])),
]
)
)
class Custom(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("payer", ObjectId(kwargs["payer"], "account")),
(
"required_auths",
Array(
[
ObjectId(o, "account")
for o in kwargs["required_auths"]
]
),
),
("id", Uint16(kwargs["id"])),
("data", Bytes(kwargs["data"])),
]
)
)
class Bid_collateral(GrapheneObject):
def detail(self, *args, **kwargs):
# New pygraphene interface!
return OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("bidder", ObjectId(kwargs["bidder"], "account")),
("additional_collateral", Asset(kwargs["additional_collateral"])),
("debt_covered", Asset(kwargs["debt_covered"])),
("extensions", Set([])),
]
)
class Balance_claim(GrapheneObject):
def detail(self, *args, **kwargs):
# New pygraphene interface!
prefix = kwargs.pop("prefix", default_prefix)
return OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"deposit_to_account",
ObjectId(kwargs["deposit_to_account"], "account"),
),
("balance_to_claim", ObjectId(kwargs["balance_to_claim"], "balance")),
(
"balance_owner_key",
PublicKey(kwargs["balance_owner_key"], prefix=prefix),
),
("total_claimed", Asset(kwargs["total_claimed"])),
]
)
class Asset_settle(GrapheneObject):
def detail(self, *args, **kwargs):
# New pygraphene interface!
return OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("amount", Asset(kwargs["amount"])),
("extensions", Set([])),
]
)
class HtlcHash(Static_variant):
elements = [Ripemd160, Sha1, Sha256, Hash160]
def __init__(self, o):
id = o[0]
if len(self.elements) <= id:
raise Exception("Unknown HTLC Hashing method")
data = self.elements[id](o[1])
super().__init__(data, id)
class Htlc_create(GrapheneObject):
def detail(self, *args, **kwargs):
return OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("from", ObjectId(kwargs["from"], "account")),
("to", ObjectId(kwargs["to"], "account")),
("amount", Asset(kwargs["amount"])),
("preimage_hash", HtlcHash(kwargs["preimage_hash"])),
("preimage_size", Uint16(kwargs["preimage_size"])),
("claim_period_seconds", Uint32(kwargs["claim_period_seconds"])),
("extensions", Set([])),
]
)
class Htlc_redeem(GrapheneObject):
def detail(self, *args, **kwargs):
return OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("htlc_id", ObjectId(kwargs["htlc_id"], "htlc")),
("redeemer", ObjectId(kwargs["redeemer"], "account")),
("preimage", Bytes(kwargs["preimage"])),
("extensions", Set([])),
]
)
class Htlc_extend(GrapheneObject):
def detail(self, *args, **kwargs):
return OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("htlc_id", ObjectId(kwargs["htlc_id"], "htlc")),
("update_issuer", ObjectId(kwargs["update_issuer"], "account")),
("seconds_to_add", Uint32(kwargs["seconds_to_add"])),
("extensions", Set([])),
]
)
class Asset_update_issuer(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("issuer", ObjectId(kwargs["issuer"], "account")),
(
"asset_to_update",
ObjectId(kwargs["asset_to_update"], "asset"),
),
("new_issuer", ObjectId(kwargs["new_issuer"], "account")),
("extensions", Set([])),
]
)
)
class Assert(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
(
"fee_paying_account",
ObjectId(kwargs["fee_paying_account"], "account"),
),
(
"predicates",
Array([AssertPredicate(o) for o in kwargs["predicates"]]),
),
(
"required_auths",
Array(
[
ObjectId(o, "account")
for o in kwargs["required_auths"]
]
),
),
("extensions", Set([])),
]
)
)
ticket_type_strings = ['liquid', 'lock_180_days', 'lock_360_days', 'lock_720_days', 'lock_forever']
class Ticket_create_operation(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if isinstance(kwargs["target_type"], int):
target_type = Varint32(kwargs["target_type"])
else:
target_type = Varint32(ticket_type_strings.index(kwargs["target_type"]))
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("target_type", target_type),
("amount", Asset(kwargs["amount"])),
("extensions", Set([])),
]
)
)
class Ticket_update_operation(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if isinstance(kwargs["target_type"], int):
target_type = Varint32(kwargs["target_type"])
else:
target_type = Varint32(ticket_type_strings.index(kwargs["target_type"]))
if kwargs.get("amount_for_new_target"):
amount_for_new_target = Optional(Asset(kwargs["amount_for_new_target"]))
else:
amount_for_new_target = Optional(None)
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("ticket", ObjectId(kwargs["ticket"], "ticket")),
("account", ObjectId(kwargs["account"], "account")),
("target_type", target_type),
("amount_for_new_target", amount_for_new_target),
("extensions", Set([])),
]
)
)
class Liquidity_pool_create(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("asset_a", ObjectId(kwargs["asset_a"], "asset")),
("asset_b", ObjectId(kwargs["asset_b"], "asset")),
("share_asset", ObjectId(kwargs["share_asset"], "asset")),
("taker_fee_percent", Uint16(kwargs["taker_fee_percent"])),
("withdrawal_fee_percent", Uint16(kwargs["withdrawal_fee_percent"])),
("extensions", Set([])),
]
)
)
class Liquidity_pool_delete(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("pool", ObjectId(kwargs["pool"], "liquidity_pool")),
("extensions", Set([])),
]
)
)
class Liquidity_pool_deposit(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("pool", ObjectId(kwargs["pool"], "liquidity_pool")),
("amount_a", Asset(kwargs["amount_a"])),
("amount_b", Asset(kwargs["amount_b"])),
("extensions", Set([])),
]
)
)
class Liquidity_pool_withdraw(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("pool", ObjectId(kwargs["pool"], "liquidity_pool")),
("share_amount", Asset(kwargs["share_amount"])),
("extensions", Set([])),
]
)
)
class Liquidity_pool_exchange(GrapheneObject):
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(
OrderedDict(
[
("fee", Asset(kwargs["fee"])),
("account", ObjectId(kwargs["account"], "account")),
("pool", ObjectId(kwargs["pool"], "liquidity_pool")),
("amount_to_sell", Asset(kwargs["amount_to_sell"])),
("min_to_receive", Asset(kwargs["min_to_receive"])),
("extensions", Set([])),
]
)
)
fill_classmaps()
| mit |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/analysis/heating/launch.py | 1 | 12656 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.analysis.heating.launch Contains the DustHeatingContributionLauncher class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
# Import astronomical modules
from astropy.units import Unit
# Import the relevant PTS classes and modules
from .component import DustHeatingAnalysisComponent
from ....core.tools import filesystem as fs
from ....core.simulation.skifile import SkiFile
from ....core.launch.batchlauncher import BatchLauncher
from ....core.simulation.definition import SingleSimulationDefinition
from ....core.tools.logging import log
from ...basics.instruments import SimpleInstrument, FrameInstrument
from ...basics.projection import GalaxyProjection
# -----------------------------------------------------------------
class DustHeatingContributionLauncher(DustHeatingAnalysisComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(DustHeatingContributionLauncher, self).__init__(config)
# -- Attributes --
# The SKIRT batch launcher
self.launcher = BatchLauncher()
# The path to the directory with the best model parameters
self.best_path = None
# The ski file corresponding to the best model
self.ski = None
# The projection systems
self.projections = dict()
# The instrument to be used for the simulations
self.instruments = dict()
# The ski files for the different contributions
self.ski_files = dict()
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new BestModelLauncher instance
launcher = cls()
# Set the modeling path
launcher.config.path = arguments.path
# Return the new instance
return launcher
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the ski file describing the best model
self.load_ski()
# 3. Load the projection systems
self.load_projections()
# 4. Create the instruments
self.create_instruments()
# 5. Create the ski files for the different contributors
self.create_ski_files()
# 6. Writing
self.write()
# 7. Launch the simulations
self.launch()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(DustHeatingContributionLauncher, self).setup()
# The path to the directory with the best model parameters
self.best_path = fs.join(self.fit_path, "best")
# Set options for the BatchLauncher
self.launcher.config.shared_input = True # The input directories for the different simulations are shared
#self.launcher.config.group_simulations = True # group multiple simulations into a single job
self.launcher.config.remotes = self.config.remotes # the remote hosts on which to run the simulations
self.launcher.config.logging.verbose = True
# -----------------------------------------------------------------
def load_ski(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the ski file for the best fitting model ...")
# Determine the path to the best model ski file
path = fs.join(self.best_path, self.galaxy_name + ".ski")
# Load the ski file
self.ski = SkiFile(path)
# -----------------------------------------------------------------
def load_projections(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the projection systems ...")
# Load the different projection systems
for name in ["earth", "faceon"]:
# Determine the path to the projection file
path = fs.join(self.components_path, name + ".proj")
# Load the projection
projection = GalaxyProjection.from_file(path)
# Add the projection to the dictionary
self.projections[name] = projection
# -----------------------------------------------------------------
def create_instruments(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the instrument ...")
# Create a SimpleInstrument for the 'earth' projection
self.instruments["earth"] = SimpleInstrument.from_projection(self.projections["earth"])
# Create a FrameInstrument for the 'faceon' projection
self.instruments["faceon"] = FrameInstrument.from_projection(self.projections["faceon"])
# -----------------------------------------------------------------
def create_ski_files(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the ski files for the different contributions ...")
# Remove the existing instruments
self.ski.remove_all_instruments()
# Add the instruments
for name in self.instruments: self.ski.add_instrument(self.instruments[name])
# Parameters of the wavelength grid
min_wavelength = self.config.wavelengths.min * Unit(self.config.wavelengths.unit)
max_wavelength = self.config.wavelengths.max * Unit(self.config.wavelengths.unit)
points = self.config.wavelengths.npoints
# Set the logarithmic wavelength grid
self.ski.set_log_wavelength_grid(min_wavelength, max_wavelength, points, write=True)
# Set the number of photon packages
self.ski.setpackages(self.config.packages)
# Set dust system writing options
self.ski.set_write_convergence()
self.ski.set_write_density()
#self.ski.set_write_depth_map()
#self.ski.set_write_quality()
self.ski.set_write_cell_properties()
#self.ski.set_write_cells_crossed()
#self.ski.set_write_emissivity()
#self.ski.set_write_temperature()
#self.ski.set_write_isrf()
self.ski.set_write_absorption()
self.ski.set_write_grid()
# Loop over the different contributions, create seperate ski file instance
for contribution in self.contributions:
# Debugging
log.debug("Adjusting ski file for the contribution of the " + contribution + " stellar population ...")
# Create a copy of the ski file instance
ski = copy.deepcopy(self.ski)
# Remove other stellar components, except for the contribution of the total stellar population
if contribution != "total": ski.remove_stellar_components_except(self.component_names[contribution])
# For the simulation with only the ionizing stellar component, also write out the stellar density
if contribution == "ionizing": ski.set_write_stellar_density()
# Add the ski file instance to the dictionary
self.ski_files[contribution] = ski
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Copy the input maps (if necessary)
self.copy_maps()
# Write the ski files
self.write_ski_files()
# -----------------------------------------------------------------
def copy_maps(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Copying the input maps ...")
# Determine the paths to the input maps in the fit/in directory
fit_in_path = fs.join(self.fit_path, "in")
old_path = fs.join(fit_in_path, "old_stars.fits")
young_path = fs.join(fit_in_path, "young_stars.fits")
ionizing_path = fs.join(fit_in_path, "ionizing_stars.fits")
dust_path = fs.join(fit_in_path, "dust.fits")
# Copy the files to the analysis/in directory (if necessary)
if not fs.has_file(self.analysis_in_path, fs.name(old_path)): fs.copy_file(old_path, self.analysis_in_path)
if not fs.has_file(self.analysis_in_path, fs.name(young_path)): fs.copy_file(young_path, self.analysis_in_path)
if not fs.has_file(self.analysis_in_path, fs.name(ionizing_path)): fs.copy_file(ionizing_path, self.analysis_in_path)
if not fs.has_file(self.analysis_in_path, fs.name(dust_path)): fs.copy_file(dust_path, self.analysis_in_path)
# -----------------------------------------------------------------
def write_ski_files(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the ski file ...")
# Loop over the contributions
for contribution in self.ski_files:
# Determine the path to the ski file
path = self.ski_paths[contribution]
# Debugging
log.debug("Writing the ski file for the " + contribution + " stellar population to '" + path + "' ...")
# Save the ski file
self.ski_files[contribution].saveto(path)
# -----------------------------------------------------------------
def launch(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Launching the simulations ...")
# Determine the path to the analysis/heating/scripts path (store batch scripts there for manual inspection)
scripts_path = fs.join(self.analysis_heating_path, "scripts")
if not fs.is_directory(scripts_path): fs.create_directory(scripts_path)
for host_id in self.launcher.host_ids:
script_dir_path = fs.join(scripts_path, host_id)
if not fs.is_directory(script_dir_path): fs.create_directory(script_dir_path)
self.launcher.set_script_path(host_id, script_dir_path)
# Set the paths to the screen output directories (for debugging) for remotes without a scheduling system for jobs
for host_id in self.launcher.no_scheduler_host_ids: self.launcher.enable_screen_output(host_id)
# Loop over the contributions
for contribution in self.ski_paths:
# Determine a name for this simulation
simulation_name = self.galaxy_name + "_heating_" + contribution
# Get the ski path for this simulation
ski_path = self.ski_paths[contribution]
# Get the local output path for the simulation
output_path = self.output_paths[contribution]
# Create the SKIRT simulation definition
definition = SingleSimulationDefinition(ski_path, self.analysis_in_path, output_path)
# Debugging
log.debug("Adding the simulation of the contribution of the " + contribution + " stellar population to the queue ...")
# Put the parameters in the queue and get the simulation object
self.launcher.add_to_queue(definition, simulation_name)
# Set scheduling options (for the different remote hosts with a scheduling system)
#for host_id in self.scheduling_options: self.launcher.set_scheduling_options(host_id, simulation_name, self.scheduling_options[host_id])
# Run the launcher, schedules the simulations
simulations = self.launcher.run()
# Loop over the scheduled simulations (if something has to be set)
#for simulation in simulations: pass
# -----------------------------------------------------------------
| mit |
adviti/melange | thirdparty/google_appengine/lib/webob/webob/updatedict.py | 35 | 1063 | """
Dict that has a callback on all updates
"""
class UpdateDict(dict):
updated = None
updated_args = None
def _updated(self):
"""
Assign to new_dict.updated to track updates
"""
updated = self.updated
if updated is not None:
args = self.updated_args
if args is None:
args = (self,)
updated(*args)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
self._updated()
def __delitem__(self, key):
dict.__delitem__(self, key)
self._updated()
def clear(self):
dict.clear(self)
self._updated()
def update(self, *args, **kw):
dict.update(self, *args, **kw)
self._updated()
def setdefault(self, key, failobj=None):
dict.setdefault(self, key, failobj)
self._updated()
def pop(self):
v = dict.pop(self)
self._updated()
return v
def popitem(self):
v = dict.popitem(self)
self._updated()
return v
| apache-2.0 |
vornne/pw_module_system | header_item_modifiers.py | 6 | 1843 | imod_plain = 0
imod_cracked = 1
imod_rusty = 2
imod_bent = 3
imod_chipped = 4
imod_battered = 5
imod_poor = 6
imod_crude = 7
imod_old = 8
imod_cheap = 9
imod_fine = 10
imod_well_made = 11
imod_sharp = 12
imod_balanced = 13
imod_tempered = 14
imod_deadly = 15
imod_exquisite = 16
imod_masterwork = 17
imod_heavy = 18
imod_strong = 19
imod_powerful = 20
imod_tattered = 21
imod_ragged = 22
imod_rough = 23
imod_sturdy = 24
imod_thick = 25
imod_hardened = 26
imod_reinforced = 27
imod_superb = 28
imod_lordly = 29
imod_lame = 30
imod_swaybacked = 31
imod_stubborn = 32
imod_timid = 33
imod_meek = 34
imod_spirited = 35
imod_champion = 36
imod_fresh = 37
imod_day_old = 38
imod_two_day_old = 39
imod_smelling = 40
imod_rotten = 41
imod_large_bag = 42
imodbit_plain = 1
imodbit_cracked = 2
imodbit_rusty = 4
imodbit_bent = 8
imodbit_chipped = 16
imodbit_battered = 32
imodbit_poor = 64
imodbit_crude = 128
imodbit_old = 256
imodbit_cheap = 512
imodbit_fine = 1024
imodbit_well_made = 2048
imodbit_sharp = 4096
imodbit_balanced = 8192
imodbit_tempered = 16384
imodbit_deadly = 32768
imodbit_exquisite = 65536
imodbit_masterwork = 131072
imodbit_heavy = 262144
imodbit_strong = 524288
imodbit_powerful = 1048576
imodbit_tattered = 2097152
imodbit_ragged = 4194304
imodbit_rough = 8388608
imodbit_sturdy = 16777216
imodbit_thick = 33554432
imodbit_hardened = 67108864
imodbit_reinforced = 134217728
imodbit_superb = 268435456
imodbit_lordly = 536870912
imodbit_lame = 1073741824
imodbit_swaybacked = 2147483648
imodbit_stubborn = 4294967296
imodbit_timid = 8589934592
imodbit_meek = 17179869184
imodbit_spirited = 34359738368
imodbit_champion = 68719476736
imodbit_fresh = 137438953472
imodbit_day_old = 274877906944
imodbit_two_day_old = 549755813888
imodbit_smelling = 1099511627776
imodbit_rotten = 2199023255552
imodbit_large_bag = 4398046511104
| bsd-3-clause |
brianwoo/django-tutorial | build/Django/build/lib.linux-x86_64-2.7/django/db/migrations/writer.py | 42 | 20479 | from __future__ import unicode_literals
import collections
import datetime
import decimal
import inspect
import math
import os
import re
import sys
import types
from importlib import import_module
from django.apps import apps
from django.db import migrations, models
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.operations.base import Operation
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
COMPILED_REGEX_TYPE = type(re.compile(''))
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s: %s' % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s=%s' % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
# Start at one because argspec includes "self"
for i, arg in enumerate(args, 1):
arg_value = arg
arg_name = argspec.args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in argspec.args[i + 1:]:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match("^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
imports.discard("from django.db import models")
items["imports"] = "\n".join(imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(migration_imports)
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
return (MIGRATION_TEMPLATE % items).encode("utf8")
@staticmethod
def serialize_datetime(value):
"""
Returns a serialized version of a datetime object that is valid,
executable python code. It converts timezone-aware values to utc with
an 'executable' utc representation of tzinfo.
"""
if value.tzinfo is not None and value.tzinfo != utc:
value = value.astimezone(utc)
value_repr = repr(value).replace("<UTC>", "utc")
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
# Python 3 fails when the migrations directory does not have a
# __init__.py file
if not hasattr(migrations_module, '__file__'):
raise ImportError
basedir = os.path.dirname(upath(migrations_module.__file__))
except ImportError:
app_config = apps.get_app_config(self.migration.app_label)
migrations_package_basename = migrations_package_name.split(".")[-1]
# Alright, see if it's a direct submodule of the app
if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name:
basedir = os.path.join(app_config.path, migrations_package_basename)
else:
# In case of using MIGRATION_MODULES setting and the custom
# package doesn't exist, create one.
package_dirs = migrations_package_name.split(".")
create_path = os.path.join(upath(sys.path[0]), *package_dirs)
if not os.path.isdir(create_path):
os.makedirs(create_path)
for i in range(1, len(package_dirs) + 1):
init_dir = os.path.join(upath(sys.path[0]), *package_dirs[:i])
init_path = os.path.join(init_dir, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
return os.path.join(create_path, self.filename)
return os.path.join(basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
name, imports = cls._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def _serialize_path(cls, path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
# Don't use the literal "{%s}" as it doesn't support empty set
format = "set([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
value_repr = cls.serialize_datetime(value)
imports = ["import datetime"]
if value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Times
elif isinstance(value, datetime.time):
value_repr = repr(value)
if isinstance(value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Timedeltas
elif isinstance(value, datetime.timedelta):
return repr(value), {"import datetime"}
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, {"from django.conf import settings"}
# Simple types
elif isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return 'float("{}")'.format(value), set()
return repr(value), set()
elif isinstance(value, six.integer_types + (bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), {"from decimal import Decimal"}
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
if module == six.moves.builtins.__name__:
return value.__name__, set()
else:
return "%s.%s" % (module, value.__name__), {"import %s" % module}
elif isinstance(value, models.manager.BaseManager):
as_manager, manager_path, qs_path, args, kwargs = value.deconstruct()
if as_manager:
name, imports = cls._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return cls.serialize_deconstructed(manager_path, args, kwargs)
elif isinstance(value, Operation):
string, imports = OperationWriter(value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module}
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__}
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (value.__name__, module_name, get_docs_version()))
return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name}
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Compiled regex
elif isinstance(value, COMPILED_REGEX_TYPE):
imports = {"import re"}
regex_pattern, pattern_imports = cls.serialize(value.pattern)
regex_flags, flag_imports = cls.serialize(value.flags)
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
# Uh oh.
else:
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| gpl-3.0 |
huchoi/edx-platform | common/djangoapps/util/tests/test_memcache.py | 101 | 3655 | """
Tests for memcache in util app
"""
from django.test import TestCase
from django.core.cache import get_cache
from util.memcache import safe_key
class MemcacheTest(TestCase):
"""
Test memcache key cleanup
"""
# Test whitespace, control characters, and some non-ASCII UTF-16
UNICODE_CHAR_CODES = ([c for c in range(0, 30)] + [127] +
[129, 500, 2 ** 8 - 1, 2 ** 8 + 1, 2 ** 16 - 1])
def setUp(self):
self.cache = get_cache('default')
def test_safe_key(self):
key = safe_key('test', 'prefix', 'version')
self.assertEqual(key, 'prefix:version:test')
def test_numeric_inputs(self):
# Numeric key
self.assertEqual(safe_key(1, 'prefix', 'version'), 'prefix:version:1')
# Numeric prefix
self.assertEqual(safe_key('test', 5, 'version'), '5:version:test')
# Numeric version
self.assertEqual(safe_key('test', 'prefix', 5), 'prefix:5:test')
def test_safe_key_long(self):
# Choose lengths close to memcached's cutoff (250)
for length in [248, 249, 250, 251, 252]:
# Generate a key of that length
key = 'a' * length
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for key length {0}".format(length))
def test_long_key_prefix_version(self):
# Long key
key = safe_key('a' * 300, 'prefix', 'version')
self.assertTrue(self._is_valid_key(key))
# Long prefix
key = safe_key('key', 'a' * 300, 'version')
self.assertTrue(self._is_valid_key(key))
# Long version
key = safe_key('key', 'prefix', 'a' * 300)
self.assertTrue(self._is_valid_key(key))
def test_safe_key_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a key with that character
key = unichr(unicode_char)
# Make the key safe
key = safe_key(key, '', '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_prefix_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a prefix with that character
prefix = unichr(unicode_char)
# Make the key safe
key = safe_key('test', prefix, '')
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def test_safe_key_version_unicode(self):
for unicode_char in self.UNICODE_CHAR_CODES:
# Generate a version with that character
version = unichr(unicode_char)
# Make the key safe
key = safe_key('test', '', version)
# The key should now be valid
self.assertTrue(self._is_valid_key(key),
msg="Failed for unicode character {0}".format(unicode_char))
def _is_valid_key(self, key):
"""
Test that a key is memcache-compatible.
Based on Django's validator in core.cache.backends.base
"""
# Check the length
if len(key) > 250:
return False
# Check that there are no spaces or control characters
for char in key:
if ord(char) < 33 or ord(char) == 127:
return False
return True
| agpl-3.0 |
nilmini20s/gem5-2016-08-13 | tests/configs/simple-atomic-mp.py | 69 | 2376 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from base_config import *
nb_cores = 4
root = BaseSESystem(mem_mode='atomic', cpu_class=AtomicSimpleCPU,
num_cpus=nb_cores).create_root()
| bsd-3-clause |
n0m4dz/odoo | openerp/cli/__init__.py | 185 | 2008 | import logging
import sys
import os
import openerp
from openerp import tools
from openerp.modules import module
_logger = logging.getLogger(__name__)
commands = {}
class CommandType(type):
def __init__(cls, name, bases, attrs):
super(CommandType, cls).__init__(name, bases, attrs)
name = getattr(cls, name, cls.__name__.lower())
cls.name = name
if name != 'command':
commands[name] = cls
class Command(object):
"""Subclass this class to define new openerp subcommands """
__metaclass__ = CommandType
def run(self, args):
pass
class Help(Command):
"""Display the list of available commands"""
def run(self, args):
print "Available commands:\n"
padding = max([len(k) for k in commands.keys()]) + 2
for k, v in commands.items():
print " %s%s" % (k.ljust(padding, ' '), v.__doc__ or '')
print "\nUse '%s <command> --help' for individual command help." % sys.argv[0].split(os.path.sep)[-1]
import server
import deploy
import scaffold
import start
def main():
args = sys.argv[1:]
# The only shared option is '--addons-path=' needed to discover additional
# commands from modules
if len(args) > 1 and args[0].startswith('--addons-path=') and not args[1].startswith("-"):
# parse only the addons-path, do not setup the logger...
tools.config._parse_config([args[0]])
args = args[1:]
# Default legacy command
command = "server"
# Subcommand discovery
if len(args) and not args[0].startswith("-"):
logging.disable(logging.CRITICAL)
for m in module.get_modules():
m_path = module.get_module_path(m)
if os.path.isdir(os.path.join(m_path, 'cli')):
__import__('openerp.addons.' + m)
logging.disable(logging.NOTSET)
command = args[0]
args = args[1:]
if command in commands:
o = commands[command]()
o.run(args)
# vim:et:ts=4:sw=4:
| agpl-3.0 |
curtisstpierre/django | django/contrib/gis/db/backends/postgis/operations.py | 168 | 15689 | import re
from django.conf import settings
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.db.backends.postgis.pgraster import (
from_pgraster, to_pgraster,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
from .models import PostGISGeometryColumns, PostGISSpatialRefSys
from .pgraster import get_pgraster_srid
class PostGISOperator(SpatialOperator):
def __init__(self, geography=False, **kwargs):
# Only a subset of the operators and functions are available
# for the geography type.
self.geography = geography
super(PostGISOperator, self).__init__(**kwargs)
def as_sql(self, connection, lookup, *args):
if lookup.lhs.output_field.geography and not self.geography:
raise ValueError('PostGIS geography does not support the "%s" '
'function/operator.' % (self.func or self.op,))
return super(PostGISOperator, self).as_sql(connection, lookup, *args)
class PostGISDistanceOperator(PostGISOperator):
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def as_sql(self, connection, lookup, template_params, sql_params):
if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection):
sql_template = self.sql_template
if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid':
template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'})
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %%s'
else:
template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'})
return sql_template % template_params, sql_params
return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params)
class PostGISOperations(BaseSpatialOperations, DatabaseOperations):
name = 'postgis'
postgis = True
geography = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': PostGISOperator(op='~'),
'bboverlaps': PostGISOperator(op='&&', geography=True),
'contained': PostGISOperator(op='@'),
'contains': PostGISOperator(func='ST_Contains'),
'overlaps_left': PostGISOperator(op='&<'),
'overlaps_right': PostGISOperator(op='&>'),
'overlaps_below': PostGISOperator(op='&<|'),
'overlaps_above': PostGISOperator(op='|&>'),
'left': PostGISOperator(op='<<'),
'right': PostGISOperator(op='>>'),
'strictly_below': PostGISOperator(op='<<|'),
'stricly_above': PostGISOperator(op='|>>'),
'same_as': PostGISOperator(op='~='),
'exact': PostGISOperator(op='~='), # alias of same_as
'contains_properly': PostGISOperator(func='ST_ContainsProperly'),
'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True),
'covers': PostGISOperator(func='ST_Covers', geography=True),
'crosses': PostGISOperator(func='ST_Crosses'),
'disjoint': PostGISOperator(func='ST_Disjoint'),
'equals': PostGISOperator(func='ST_Equals'),
'intersects': PostGISOperator(func='ST_Intersects', geography=True),
'overlaps': PostGISOperator(func='ST_Overlaps'),
'relate': PostGISOperator(func='ST_Relate'),
'touches': PostGISOperator(func='ST_Touches'),
'within': PostGISOperator(func='ST_Within'),
'dwithin': PostGISOperator(func='ST_DWithin', geography=True),
'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True),
'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True),
'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True),
'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True),
}
unsupported_functions = set()
function_names = {
'BoundingCircle': 'ST_MinimumBoundingCircle',
'MemSize': 'ST_Mem_Size',
'NumPoints': 'ST_NPoints',
}
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
self.area = prefix + 'Area'
self.bounding_circle = prefix + 'MinimumBoundingCircle'
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.extent3d = prefix + '3DExtent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = prefix + 'GeoHash'
self.geojson = prefix + 'AsGeoJson'
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length3d = prefix + '3DLength'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.perimeter3d = prefix + '3DPerimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
# Run a basic query to check the status of the connection so we're
# sure we only raise the error below if the problem comes from
# PostGIS and not from PostgreSQL itself (see #24862).
self._get_postgis_func('version')
try:
vtup = self.postgis_version_tuple()
except ProgrammingError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s" '
'using command "SELECT postgis_lib_version()". '
'GeoDjango requires at least PostGIS version 2.0. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def convert_extent(self, box, srid):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
if box is None:
return None
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d, srid):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returned by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
if box3d is None:
return None
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex, srid=geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given spatial field.
"""
if f.geom_type == 'RASTER':
return 'raster'
elif f.geography:
if f.srid != 4326:
raise NotImplementedError('PostGIS only supports geography columns with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
else:
# Type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the geography column type.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
# Get the srid for this object
if value is None:
value_srid = None
elif f.geom_type == 'RASTER':
value_srid = get_pgraster_srid(value)
else:
value_srid = value.srid
# Adding Transform() to the SQL placeholder if the value srid
# is not equal to the field srid.
if value_srid is None or value_srid == f.srid:
placeholder = '%s'
elif f.geom_type == 'RASTER':
placeholder = '%s((%%s)::raster, %s)' % (self.transform, f.srid)
else:
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'as_sql'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
sql, _ = compiler.compile(value)
placeholder = placeholder % sql
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def spatial_aggregate_name(self, agg_name):
if agg_name == 'Extent3D':
return self.extent3d
else:
return self.geom_func_prefix + agg_name
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return PostGISGeometryColumns
def spatial_ref_sys(self):
return PostGISSpatialRefSys
# Methods to convert between PostGIS rasters and dicts that are
# readable by GDALRaster.
def parse_raster(self, value):
return from_pgraster(value)
def deconstruct_raster(self, value):
return to_pgraster(value)
| bsd-3-clause |
cseagle/fREedom | pe_loader.py | 1 | 13257 | #!/usr/bin/env python
'''
Crude PE32 / PE32+ loader, conforming to the Loader interface, for a stand-alone binnavi compatible disassembler
'''
__author__ = "Chris Eagle"
__copyright__ = "Copyright 2015, Chris Eagle"
__credits__ = ["Chris Eagle"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Chris Eagle"
__email__ = "[email protected]"
__status__ = "Use at your own risk"
import sys
import struct
import hashlib
import binascii
import capstone
from loader import *
IMAGE_FILE_MACHINE_I386 = 0x14c
IMAGE_FILE_MACHINE_ARM = 0x1c0
IMAGE_FILE_MACHINE_THUMB = 0x1c2
IMAGE_FILE_MACHINE_ARMV7 = 0x1c4
IMAGE_FILE_MACHINE_AMD64 = 0x8664
OK_PE_MACHINES = [IMAGE_FILE_MACHINE_I386, IMAGE_FILE_MACHINE_ARM,
IMAGE_FILE_MACHINE_THUMB, IMAGE_FILE_MACHINE_ARMV7,
IMAGE_FILE_MACHINE_AMD64]
IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b
IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_SCN_MEM_EXECUTE = 0x20000000
IMAGE_SCN_MEM_READ = 0x40000000
IMAGE_SCN_MEM_WRITE = 0x80000000
DATA_DIRECTORY_EXPORT = 0
DATA_DIRECTORY_IMPORT = 1
class InvalidHeader(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class FileHeader(object):
def __init__(self, raw, offset):
self.raw = raw[offset:offset+20]
fields = struct.unpack("<HHIIIHH", self.raw)
self.Machine = fields[0]
self.NumberOfSections = fields[1]
self.TimeDateStamp = fields[2]
self.PointerToSymbolTable = fields[3]
self.NumberOfSynbols = fields[4]
self.SizeOfOptionalHeader = fields[5]
self.Characteristics = fields[6]
def __del__(self):
del self.raw
def sizeof(self):
return len(self.raw)
class ImportDirectory(object):
# enough loading has taken place by the time that we get here
# that we need to start dealing with RVA
def __init__(self, pe, va):
self.raw = pe.get_bytes(va, 20)
fields = struct.unpack("<IIIII", self.raw)
self.ilt = fields[0]
self.time_date = fields[1]
self.forwarder = fields[2]
self.name_rva = fields[3]
self.iat = fields[4]
self.pe = pe
def __del__(self):
del self.raw
def parse(self):
self.dll = self.pe.get_string(self.name_rva + self.pe.image_base)
if self.ilt != 0:
iat = self.ilt
else:
iat = self.iat
mask = 0x80 << (self.pe.sizeof_ptr * 8 - 8)
while True:
addr = iat + self.pe.image_base
iat += self.pe.sizeof_ptr
ie = self.pe.get_pointer(addr)
if ie == 0:
break
if ie & mask:
# it's an ordinal
func = "%s_%d" % (self.dll.replace('.', '_'), ie & 0xffff)
else:
# it's a name rva
func = self.pe.get_string(ie + 2 + self.pe.image_base)
self.pe.add_import(addr, func)
def is_null_dir(self):
return self.raw == ('\x00'*20)
class ExportDirectory(object):
# enough loading has taken place by the time that we get here
# that we need to start dealing with RVA
def __init__(self, pe, va, size):
self.raw = pe.get_bytes(va, 40)
self.rva = va - pe.image_base
self.end_rva = self.rva + size
fields = struct.unpack("<7I", self.raw[12:])
self.NameRva = fields[0]
self.OrdinalBase = fields[1]
self.NumberOfFunctions = fields[2]
self.NumberOfNames = fields[3]
self.AddressOfFunctions = fields[4]
self.AddressOfNames = fields[5]
self.AddressOfNameOrdinals = fields[6]
self.pe = pe
def __del__(self):
del self.raw
def parse(self):
self.dll = self.pe.get_string(self.NameRva + self.pe.image_base)
aof = self.AddressOfFunctions + self.pe.image_base
aon = self.AddressOfNames + self.pe.image_base
aono = self.AddressOfNameOrdinals + self.pe.image_base
fcount = 0
for f in range(self.NumberOfNames):
name_rva = self.pe.get_dword(aon)
aon += 4
name = self.pe.get_string(name_rva + self.pe.image_base)
func_idx = self.pe.get_word(aono + f * 2)
func_rva = self.pe.get_dword(aof + func_idx * 4)
if func_rva >= self.rva and func_rva < self.end_rva:
#this is a forwarded entry
fcount += 1
continue
else:
self.pe.add_export(func_rva + self.pe.image_base, name)
for f in range(self.NumberOfNames, self.NumberOfFunctions):
name = "%s_%d" % (self.dll.replace('.', '_'), f)
func_idx = self.pe.get_word(aono + f * 2)
func_rva = self.pe.get_dword(aof + func_idx * 4)
self.pe.add_export(func_rva + self.pe.image_base, name)
class OptionalHeaderBase(object):
def __init__(self, raw, offset):
try:
self.common = raw[offset:offset+24]
fields = struct.unpack("<HBBIIIII", self.common)
self.Magic = fields[0]
self.SizeOfCode = fields[3]
self.SizeOfInitializedData = fields[4]
self.SizeOfUninitializedData = fields[5]
self.AddressOfEntryPoint = fields[6]
self.BaseOfCode = fields[7]
if self.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC:
bod = raw[offset+24:offset+28]
self.common += bod
self.BaseOfData = struct.unpack("<I", bod)[0]
self.DataDirectories = None
self.ImageBase = 0
except Exception as e:
print e.message
raise InvalidHeader("Invalid PE header")
def __del__(self):
del self.common
# return va (not rva),size
def get_directory(self, n):
rva,size = struct.unpack("<II", self.DataDirectories[n * 8:8 + n * 8])
if size == 0:
return None, None
return self.ImageBase + rva, size
class OptionalHeader32(OptionalHeaderBase):
def __init__(self, raw, offset):
OptionalHeaderBase.__init__(self, raw, offset)
try:
self.fields = raw[offset+28:offset+96]
tmp = struct.unpack("<III", self.fields[0:12])
self.ImageBase = tmp[0]
self.SectionAlignment = tmp[1]
self.FileAlignment = tmp[2]
self.NumberOfRvaAndSizes = struct.unpack("<I", self.fields[-4:])[0]
self.DataDirectories = raw[offset+96:offset+96+self.NumberOfRvaAndSizes*8]
except Exception as e:
print e.message
raise InvalidHeader("Invalid PE32 header")
def __del__(self):
del self.fields
del self.DataDirectories
OptionalHeaderBase.__del__(self)
class OptionalHeader64(OptionalHeaderBase):
def __init__(self, raw, offset):
OptionalHeaderBase.__init__(self, raw, offset)
try:
self.fields = raw[offset+24:offset+112]
tmp = struct.unpack("<QII", self.fields[0:16])
self.ImageBase = tmp[0]
self.SectionAlignment = tmp[1]
self.FileAlignment = tmp[2]
self.NumberOfRvaAndSizes = struct.unpack("<I", self.fields[-4:])[0]
self.DataDirectories = raw[offset+112:offset+112+self.NumberOfRvaAndSizes*8]
except Exception as e:
raise InvalidHeader("Invalid PE64 header")
def __del__(self):
del self.fields
del self.DataDirectories
OptionalHeaderBase.__del__(self)
class SectionHeader(object):
def __init__(self, raw, offset):
# try:
self.raw = raw[offset:offset+40]
fields = struct.unpack("<8sIIIIIIHHI", self.raw)
self.Name = fields[0].rstrip('\x00')
self.VirtualSize = fields[1]
self.VirtualAddress = fields[2]
self.SizeOfRawData = fields[3]
self.PointerToRawData = fields[4]
self.PointerToRelocations = fields[5]
self.PointerToLinenumbers = fields[6]
self.NumberOfRelocations = fields[7]
self.NumberOfLinenumbers = fields[8]
self.Characteristics = fields[9]
self.perms = 0
if self.Characteristics & IMAGE_SCN_MEM_READ:
self.perms |= PROT_READ
if self.Characteristics & IMAGE_SCN_MEM_WRITE:
self.perms |= PROT_WRITE
if self.Characteristics & IMAGE_SCN_MEM_EXECUTE:
self.perms |= PROT_EXEC
# except:
# raise InvalidHeader("Invalid section header")
def __del__(self):
del self.raw
class PeBase(Loader):
def __init__(self, pe_file):
Loader.__init__(self, pe_file)
self.pe_offset = 0
self.section_headers = []
self.non_returning_funcs.append("ExitProcess")
self.non_returning_funcs.append("ExitThread")
self.non_returning_funcs.append("_ExitProcess")
self.non_returning_funcs.append("_ExitThread")
def __del__(self):
del self.section_headers[:]
del self.section_headers
Loader.__del__(self)
# Perform common PE validation tasks
def is_valid(self):
if self.raw[0:2] != 'MZ':
return False
# image sections are still in .raw mode at this point
self.pe_offset = self.get_dword(0x3c)
if self.get_dword(self.pe_offset) != IMAGE_NT_SIGNATURE:
return False
self.FileHeader = FileHeader(self.raw, self.pe_offset + 4)
if self.FileHeader.Machine == IMAGE_FILE_MACHINE_I386:
self.arch = capstone.CS_ARCH_X86
self.mode = capstone.CS_MODE_32
self.arch_name = 'x86-32'
elif self.FileHeader.Machine == IMAGE_FILE_MACHINE_ARM or self.FileHeader.Machine == IMAGE_FILE_MACHINE_THUMB:
self.arch = capstone.CS_ARCH_ARM
self.mode = capstone.CS_MODE_ARM
self.arch_name = 'ARM-32'
elif self.FileHeader.Machine == IMAGE_FILE_MACHINE_ARMV7:
self.arch = capstone.CS_ARCH_ARM
self.mode = capstone.CS_MODE_THUMB
self.arch_name = 'ARM-THUMB'
elif self.FileHeader.Machine == IMAGE_FILE_MACHINE_AMD64:
self.arch = capstone.CS_ARCH_X86
self.mode = capstone.CS_MODE_64
self.arch_name = 'x86-64'
else:
# anything else, we don't recognize
# could move this check into the caller
# to allow it to determine whether it has an appropriate
# disassembler
return False
oh_magic = self.get_word(self.pe_offset + 24)
if oh_magic != IMAGE_NT_OPTIONAL_HDR32_MAGIC and oh_magic != IMAGE_NT_OPTIONAL_HDR64_MAGIC:
return False
#many other checks we could perform
return True
def load_sections(self):
del self.mapped[:]
del self.sections[:]
self.sections_by_name.clear()
for i in range(self.FileHeader.NumberOfSections):
self.section_headers.append(SectionHeader(self.raw, self.pe_offset + 24 + self.FileHeader.SizeOfOptionalHeader + i * 40))
for s in self.section_headers:
va = self.OptionalHeader.ImageBase + s.VirtualAddress
mr = self.raw[s.PointerToRawData:s.PointerToRawData+s.SizeOfRawData].ljust(s.VirtualSize, '\x00')
self.add_mapped(va, va + max(s.VirtualSize, s.SizeOfRawData), s.perms, mr)
self.add_section(s.Name, va, va + s.VirtualSize, s.perms, s.SizeOfRawData)
def parse_imports(self):
va,size = self.OptionalHeader.get_directory(DATA_DIRECTORY_IMPORT)
if size is not None:
while True:
id = ImportDirectory(self, va)
if id.is_null_dir():
break
id.parse()
va += 20
def parse_symbols(self):
pass
def parse_exports(self):
self.add_export(self.start, "_start")
va,size = self.OptionalHeader.get_directory(DATA_DIRECTORY_EXPORT)
if size is not None:
exp = ExportDirectory(self, va, size)
exp.parse()
def load(self):
if self.is_valid():
self.image_base = self.OptionalHeader.ImageBase
self.start = self.OptionalHeader.AddressOfEntryPoint + self.image_base
self.load_sections()
self.parse_imports()
self.parse_exports()
return True
return False
class Pe32(PeBase):
def __init__(self, pe_file):
PeBase.__init__(self, pe_file)
# override to perform file type validation checks such
# as checking magic numbers, etc
def is_valid(self):
try:
if not PeBase.is_valid(self):
return False
#now do PE32 specific checks
self.OptionalHeader = OptionalHeader32(self.raw, self.pe_offset + 24)
if self.OptionalHeader.Magic != IMAGE_NT_OPTIONAL_HDR32_MAGIC:
return False
self.set_pointer_size(4)
except Exception as e:
#any exception means it's not a PE32
raise e
# return False
return True
class Pe64(PeBase):
def __init__(self, pe_file):
PeBase.__init__(self, pe_file)
# override to perform file type validation checks such
# as checking magic numbers, etc
def is_valid(self):
try:
if not PeBase.is_valid(self):
return False
#now do PE64 specific checks
self.OptionalHeader = OptionalHeader64(self.raw, self.pe_offset + 24)
if self.OptionalHeader.Magic != IMAGE_NT_OPTIONAL_HDR64_MAGIC:
return False
self.set_pointer_size(8)
except Exception as e:
#any exception means it's not a PE32
raise e
# return False
return True
| gpl-2.0 |
nkgilley/home-assistant | homeassistant/components/universal/media_player.py | 3 | 16205 | """Combination of multiple media players for a universal controller."""
from copy import copy
import logging
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
SERVICE_CLEAR_PLAYLIST,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ENTITY_PICTURE,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
CONF_STATE,
CONF_STATE_TEMPLATE,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_SHUFFLE_SET,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import async_call_from_config
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVE_CHILD = "active_child"
ATTR_DATA = "data"
CONF_ATTRS = "attributes"
CONF_CHILDREN = "children"
CONF_COMMANDS = "commands"
CONF_SERVICE = "service"
CONF_SERVICE_DATA = "service_data"
OFF_STATES = [STATE_IDLE, STATE_OFF, STATE_UNAVAILABLE]
ATTRS_SCHEMA = cv.schema_with_slug_keys(cv.string)
CMD_SCHEMA = cv.schema_with_slug_keys(cv.SERVICE_SCHEMA)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_CHILDREN, default=[]): cv.entity_ids,
vol.Optional(CONF_COMMANDS, default={}): CMD_SCHEMA,
vol.Optional(CONF_ATTRS, default={}): vol.Or(
cv.ensure_list(ATTRS_SCHEMA), ATTRS_SCHEMA
),
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
},
extra=vol.REMOVE_EXTRA,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the universal media players."""
player = UniversalMediaPlayer(
hass,
config.get(CONF_NAME),
config.get(CONF_CHILDREN),
config.get(CONF_COMMANDS),
config.get(CONF_ATTRS),
config.get(CONF_STATE_TEMPLATE),
)
async_add_entities([player])
class UniversalMediaPlayer(MediaPlayerEntity):
"""Representation of an universal media player."""
def __init__(self, hass, name, children, commands, attributes, state_template=None):
"""Initialize the Universal media device."""
self.hass = hass
self._name = name
self._children = children
self._cmds = commands
self._attrs = {}
for key, val in attributes.items():
attr = val.split("|", 1)
if len(attr) == 1:
attr.append(None)
self._attrs[key] = attr
self._child_state = None
self._state_template = state_template
if state_template is not None:
self._state_template.hass = hass
async def async_added_to_hass(self):
"""Subscribe to children and template state changes."""
@callback
def async_on_dependency_update(*_):
"""Update ha state when dependencies update."""
self.async_schedule_update_ha_state(True)
depend = copy(self._children)
for entity in self._attrs.values():
depend.append(entity[0])
if self._state_template is not None:
for entity in self._state_template.extract_entities():
depend.append(entity)
self.hass.helpers.event.async_track_state_change(
list(set(depend)), async_on_dependency_update
)
def _entity_lkp(self, entity_id, state_attr=None):
"""Look up an entity state."""
state_obj = self.hass.states.get(entity_id)
if state_obj is None:
return
if state_attr:
return state_obj.attributes.get(state_attr)
return state_obj.state
def _override_or_child_attr(self, attr_name):
"""Return either the override or the active child for attr_name."""
if attr_name in self._attrs:
return self._entity_lkp(
self._attrs[attr_name][0], self._attrs[attr_name][1]
)
return self._child_attr(attr_name)
def _child_attr(self, attr_name):
"""Return the active child's attributes."""
active_child = self._child_state
return active_child.attributes.get(attr_name) if active_child else None
async def _async_call_service(
self, service_name, service_data=None, allow_override=False
):
"""Call either a specified or active child's service."""
if service_data is None:
service_data = {}
if allow_override and service_name in self._cmds:
await async_call_from_config(
self.hass,
self._cmds[service_name],
variables=service_data,
blocking=True,
validate_config=False,
)
return
active_child = self._child_state
if active_child is None:
# No child to call service on
return
service_data[ATTR_ENTITY_ID] = active_child.entity_id
await self.hass.services.async_call(
DOMAIN, service_name, service_data, blocking=True, context=self._context
)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def master_state(self):
"""Return the master state for entity or None."""
if self._state_template is not None:
return self._state_template.async_render()
if CONF_STATE in self._attrs:
master_state = self._entity_lkp(
self._attrs[CONF_STATE][0], self._attrs[CONF_STATE][1]
)
return master_state if master_state else STATE_OFF
return None
@property
def name(self):
"""Return the name of universal player."""
return self._name
@property
def state(self):
"""Return the current state of media player.
Off if master state is off
else Status of first active child
else master state or off
"""
master_state = self.master_state # avoid multiple lookups
if (master_state == STATE_OFF) or (self._state_template is not None):
return master_state
active_child = self._child_state
if active_child:
return active_child.state
return master_state if master_state else STATE_OFF
@property
def volume_level(self):
"""Volume level of entity specified in attributes or active child."""
try:
return float(self._override_or_child_attr(ATTR_MEDIA_VOLUME_LEVEL))
except (TypeError, ValueError):
return None
@property
def is_volume_muted(self):
"""Boolean if volume is muted."""
return self._override_or_child_attr(ATTR_MEDIA_VOLUME_MUTED) in [True, STATE_ON]
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._child_attr(ATTR_MEDIA_CONTENT_ID)
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return self._child_attr(ATTR_MEDIA_CONTENT_TYPE)
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._child_attr(ATTR_MEDIA_DURATION)
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._child_attr(ATTR_ENTITY_PICTURE)
@property
def entity_picture(self):
"""
Return image of the media playing.
The universal media player doesn't use the parent class logic, since
the url is coming from child entity pictures which have already been
sent through the API proxy.
"""
return self.media_image_url
@property
def media_title(self):
"""Title of current playing media."""
return self._child_attr(ATTR_MEDIA_TITLE)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ARTIST)
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ALBUM_NAME)
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ALBUM_ARTIST)
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_TRACK)
@property
def media_series_title(self):
"""Return the title of the series of current playing media (TV)."""
return self._child_attr(ATTR_MEDIA_SERIES_TITLE)
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_SEASON)
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_EPISODE)
@property
def media_channel(self):
"""Channel currently playing."""
return self._child_attr(ATTR_MEDIA_CHANNEL)
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return self._child_attr(ATTR_MEDIA_PLAYLIST)
@property
def app_id(self):
"""ID of the current running app."""
return self._child_attr(ATTR_APP_ID)
@property
def app_name(self):
"""Name of the current running app."""
return self._child_attr(ATTR_APP_NAME)
@property
def source(self):
"""Return the current input source of the device."""
return self._override_or_child_attr(ATTR_INPUT_SOURCE)
@property
def source_list(self):
"""List of available input sources."""
return self._override_or_child_attr(ATTR_INPUT_SOURCE_LIST)
@property
def shuffle(self):
"""Boolean if shuffling is enabled."""
return self._override_or_child_attr(ATTR_MEDIA_SHUFFLE)
@property
def supported_features(self):
"""Flag media player features that are supported."""
flags = self._child_attr(ATTR_SUPPORTED_FEATURES) or 0
if SERVICE_TURN_ON in self._cmds:
flags |= SUPPORT_TURN_ON
if SERVICE_TURN_OFF in self._cmds:
flags |= SUPPORT_TURN_OFF
if any([cmd in self._cmds for cmd in [SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN]]):
flags |= SUPPORT_VOLUME_STEP
if SERVICE_VOLUME_SET in self._cmds:
flags |= SUPPORT_VOLUME_SET
if SERVICE_VOLUME_MUTE in self._cmds and ATTR_MEDIA_VOLUME_MUTED in self._attrs:
flags |= SUPPORT_VOLUME_MUTE
if SERVICE_SELECT_SOURCE in self._cmds:
flags |= SUPPORT_SELECT_SOURCE
if SERVICE_CLEAR_PLAYLIST in self._cmds:
flags |= SUPPORT_CLEAR_PLAYLIST
if SERVICE_SHUFFLE_SET in self._cmds and ATTR_MEDIA_SHUFFLE in self._attrs:
flags |= SUPPORT_SHUFFLE_SET
return flags
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
active_child = self._child_state
return {ATTR_ACTIVE_CHILD: active_child.entity_id} if active_child else {}
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._child_attr(ATTR_MEDIA_POSITION)
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._child_attr(ATTR_MEDIA_POSITION_UPDATED_AT)
async def async_turn_on(self):
"""Turn the media player on."""
await self._async_call_service(SERVICE_TURN_ON, allow_override=True)
async def async_turn_off(self):
"""Turn the media player off."""
await self._async_call_service(SERVICE_TURN_OFF, allow_override=True)
async def async_mute_volume(self, mute):
"""Mute the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
await self._async_call_service(SERVICE_VOLUME_MUTE, data, allow_override=True)
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
await self._async_call_service(SERVICE_VOLUME_SET, data, allow_override=True)
async def async_media_play(self):
"""Send play command."""
await self._async_call_service(SERVICE_MEDIA_PLAY)
async def async_media_pause(self):
"""Send pause command."""
await self._async_call_service(SERVICE_MEDIA_PAUSE)
async def async_media_stop(self):
"""Send stop command."""
await self._async_call_service(SERVICE_MEDIA_STOP)
async def async_media_previous_track(self):
"""Send previous track command."""
await self._async_call_service(SERVICE_MEDIA_PREVIOUS_TRACK)
async def async_media_next_track(self):
"""Send next track command."""
await self._async_call_service(SERVICE_MEDIA_NEXT_TRACK)
async def async_media_seek(self, position):
"""Send seek command."""
data = {ATTR_MEDIA_SEEK_POSITION: position}
await self._async_call_service(SERVICE_MEDIA_SEEK, data)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id}
await self._async_call_service(SERVICE_PLAY_MEDIA, data)
async def async_volume_up(self):
"""Turn volume up for media player."""
await self._async_call_service(SERVICE_VOLUME_UP, allow_override=True)
async def async_volume_down(self):
"""Turn volume down for media player."""
await self._async_call_service(SERVICE_VOLUME_DOWN, allow_override=True)
async def async_media_play_pause(self):
"""Play or pause the media player."""
await self._async_call_service(SERVICE_MEDIA_PLAY_PAUSE)
async def async_select_source(self, source):
"""Set the input source."""
data = {ATTR_INPUT_SOURCE: source}
await self._async_call_service(SERVICE_SELECT_SOURCE, data, allow_override=True)
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._async_call_service(SERVICE_CLEAR_PLAYLIST)
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffling."""
data = {ATTR_MEDIA_SHUFFLE: shuffle}
await self._async_call_service(SERVICE_SHUFFLE_SET, data, allow_override=True)
async def async_update(self):
"""Update state in HA."""
for child_name in self._children:
child_state = self.hass.states.get(child_name)
if child_state and child_state.state not in OFF_STATES:
self._child_state = child_state
return
self._child_state = None
| apache-2.0 |
Cloudify-PS/cloudify-manager-blueprints | components/nginx/scripts/preconfigure.py | 1 | 4140 | #!/usr/bin/env python
# source: nginx -> target: manager_configuration
from os.path import join, dirname
from collections import namedtuple
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
src_runtime_props = ctx.source.instance.runtime_properties
NGINX_SERVICE_NAME = src_runtime_props['service_name']
CONFIG_PATH = 'components/{0}/config'.format(NGINX_SERVICE_NAME)
def _deploy_nginx_config_files(external_rest_protocol):
resource = namedtuple('Resource', 'src dst')
ctx.logger.info('Deploying Nginx configuration files...')
resources = [
resource(
src='{0}/{1}-external-rest-server.cloudify'.format(
CONFIG_PATH,
external_rest_protocol
),
dst='/etc/nginx/conf.d/{0}-external-rest-server.cloudify'.format(
external_rest_protocol
)
),
resource(
src='{0}/https-internal-rest-server.cloudify'.format(
CONFIG_PATH
),
dst='/etc/nginx/conf.d/https-internal-rest-server.cloudify'
),
resource(
src='{0}/https-file-server.cloudify'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/https-file-server.cloudify'
),
resource(
src='{0}/nginx.conf'.format(CONFIG_PATH),
dst='/etc/nginx/nginx.conf'
),
resource(
src='{0}/default.conf'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/default.conf',
),
resource(
src='{0}/rest-location.cloudify'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/rest-location.cloudify',
),
resource(
src='{0}/fileserver-location.cloudify'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/fileserver-location.cloudify',
),
resource(
src='{0}/redirect-to-fileserver.cloudify'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/redirect-to-fileserver.cloudify',
),
resource(
src='{0}/ui-locations.cloudify'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/ui-locations.cloudify',
),
resource(
src='{0}/logs-conf.cloudify'.format(CONFIG_PATH),
dst='/etc/nginx/conf.d/logs-conf.cloudify',
)
]
for resource in resources:
utils.deploy_blueprint_resource(
resource.src,
resource.dst,
NGINX_SERVICE_NAME,
load_ctx=False
)
def preconfigure_nginx():
target_runtime_props = ctx.target.instance.runtime_properties
# This is used by nginx's default.conf to select the relevant configuration
external_rest_protocol = target_runtime_props['external_rest_protocol']
internal_cert_path, internal_key_path = utils.generate_internal_ssl_cert(
target_runtime_props['internal_rest_host']
)
src_runtime_props['external_rest_protocol'] = external_rest_protocol
src_runtime_props['internal_cert_path'] = internal_cert_path
src_runtime_props['internal_key_path'] = internal_key_path
src_runtime_props['file_server_root'] = utils.MANAGER_RESOURCES_HOME
# Pass on the the path to the certificate to manager_configuration
target_runtime_props['internal_cert_path'] = internal_cert_path
if external_rest_protocol == 'https':
external_cert_path, external_key_path = \
utils.deploy_or_generate_external_ssl_cert(
target_runtime_props['external_rest_host']
)
src_runtime_props['external_cert_path'] = external_cert_path
src_runtime_props['external_key_path'] = external_key_path
# The public cert content is used in the outputs later
external_rest_cert_content = utils.get_file_content(external_cert_path)
target_runtime_props['external_rest_cert_content'] = \
external_rest_cert_content
_deploy_nginx_config_files(external_rest_protocol)
utils.systemd.enable(NGINX_SERVICE_NAME, append_prefix=False)
preconfigure_nginx()
| apache-2.0 |
toenuff/treadmill | lib/python/treadmill/zkutils.py | 1 | 18255 | """Treadmill ZooKeeper helper functions."""
from __future__ import absolute_import
import sys
import os
import fnmatch
import importlib
import logging
import pickle
import threading
import types
import kazoo
import kazoo.client
import kazoo.exceptions
import kazoo.security
from kazoo.protocol import states
import yaml
from . import userutil
from . import utils
from . import sysinfo
from . import trace
_LOGGER = logging.getLogger(__name__)
logging.getLogger('kazoo.client').setLevel(logging.WARNING)
# This is the maximum time the start will try to connect for, i.e. 30 sec
ZK_MAX_CONNECTION_START_TIMEOUT = 30
try:
_ZK_PLUGIN_MOD = importlib.import_module('treadmill.plugins.zookeeper')
except ImportError:
_ZK_PLUGIN_MOD = None
def _is_valid_perm(perm):
"""Check string to be valid permission spec."""
for char in perm:
if char not in 'rwcda':
return False
return True
def make_user_acl(user, perm):
"""Constructs an ACL based on user and permissions.
ACL properties:
- schema: kerberos
- principal: user://<user>
"""
assert _is_valid_perm(perm)
if _ZK_PLUGIN_MOD and hasattr(_ZK_PLUGIN_MOD, 'make_user_acl'):
return _ZK_PLUGIN_MOD.make_user_acl(user, perm)
else:
return make_anonymous_acl(perm)
def make_role_acl(role, perm):
"""Constructs a file based acl based on role.
Role file are assumed to be in /treadmill/roles directory.
Treadmill master runs in chrooted environment, so path to roles files
is hardcoded.
"""
assert _is_valid_perm(perm)
if _ZK_PLUGIN_MOD and hasattr(_ZK_PLUGIN_MOD, 'make_role_acl'):
return _ZK_PLUGIN_MOD.make_role_acl(role, perm)
else:
return make_anonymous_acl(perm)
def make_host_acl(host, perm):
"""Constucts acl for the current user.
Given the host as the principal.
"""
assert _is_valid_perm(perm)
if _ZK_PLUGIN_MOD and hasattr(_ZK_PLUGIN_MOD, 'make_host_acl'):
return _ZK_PLUGIN_MOD.make_host_acl(host, perm)
else:
return make_user_acl('host/{0}'.format(host), perm)
def make_self_acl(perm):
"""Constucts acl for the current user.
If the user is root, use host/<hostname> principal.
"""
assert _is_valid_perm(perm)
if userutil.is_root():
return make_host_acl(sysinfo.hostname(), perm)
user = userutil.get_current_username()
return make_user_acl(user, perm)
def make_anonymous_acl(perm):
"""Constructs anonymous (world) acl."""
if not perm:
perm = 'r'
assert _is_valid_perm(perm)
return kazoo.security.make_acl('world', 'anyone',
read='r' in perm,
write='w' in perm,
create='c' in perm,
delete='d' in perm,
admin='a' in perm)
def make_default_acl(acls):
"""Constructs a default Treadmill acl."""
realacl = [
make_role_acl('readers', 'r'),
make_role_acl('admin', 'rwcda'),
make_self_acl('rwcda'),
]
if acls:
realacl.extend(acls)
return realacl
@trace.disable
def make_safe_create(zkclient):
"""Makes a wrapper for kazoo.client.create enforcing default acl."""
_create = zkclient.create
def safe_create(self_unused, path, value='', acl=None, ephemeral=False,
sequence=False, makepath=False):
"""Safe wrapper around kazoo.client.create"""
return _create(path, value=value, acl=make_default_acl(acl),
ephemeral=ephemeral, sequence=sequence,
makepath=makepath)
return safe_create
@trace.disable
def make_safe_ensure_path(zkclient):
"""Makes a wrapper for kazoo.client.ensure_path enforcing default acl."""
ensure_path = zkclient.ensure_path
def safe_ensure_path(self_unused, path, acl=None):
"""Safe wrapper around kazoo.client.ensure_path"""
return ensure_path(path, acl=make_default_acl(acl))
return safe_ensure_path
@trace.disable
def make_safe_set_acls(zkclient):
"""Makes a wrapper for kazoo.client.set_acls enforcing default acl."""
set_acls = zkclient.set_acls
def safe_set_acls(self_unused, path, acls, version=-1):
"""Safe wrapper around kazoo.client.set_acls"""
return set_acls(path, make_default_acl(acls), version=version)
return safe_set_acls
def exit_on_lost(state):
"""Watch for connection events and exit if disconnected."""
_LOGGER.debug('ZK connection state: %s', state)
if state == states.KazooState.LOST:
_LOGGER.info('Exiting on ZK connection lost.')
utils.sys_exit(-1)
def exit_on_disconnect(state):
"""Watch for connection events and exit if disconnected."""
_LOGGER.debug('ZK connection state: %s', state)
if state != states.KazooState.CONNECTED:
_LOGGER.info('Exiting on ZK connection lost.')
utils.sys_exit(-1)
def exit_never(state):
"""Watch for connection state, never exit."""
_LOGGER.debug('ZK connection state: %s', state)
def disconnect(zkclient):
"""Gracefully close Zookeeper connection."""
_LOGGER.info('Closing zk connection.')
zkclient.stop()
zkclient.close()
def connect(zkurl, idpath=None, listener=None, max_tries=30,
timeout=ZK_MAX_CONNECTION_START_TIMEOUT, chroot=None):
"""Establish connection with Zk and return KazooClient.
Methods that create/modify nodes are wrapped so that default acls are
enforced. The goal is to prevent any node to be created with default acl,
which is full access to anonymous user.
:param max_tries:
the maximum number of retries when trying to connect to the the
servers; default is 10.
:param timeout:
the maximum timeout while trying to connect, that wait this much time
while try to keep connecting.
"""
client_id = None
if idpath:
if os.path.exists(idpath):
with open(idpath, 'r') as idfile:
client_id = pickle.load(idfile)
zkclient = connect_native(zkurl, client_id=client_id, listener=listener,
timeout=timeout, max_tries=max_tries,
chroot=chroot)
if idpath:
client_id = zkclient.client_id
with open(idpath, 'w+') as idfile:
pickle.dump(client_id, idfile)
zkclient.create = types.MethodType(make_safe_create(zkclient), zkclient)
zkclient.ensure_path = types.MethodType(make_safe_ensure_path(zkclient),
zkclient)
zkclient.set_acls = types.MethodType(make_safe_set_acls(zkclient),
zkclient)
return zkclient
def connect_native(zkurl, client_id=None, listener=None, max_tries=30,
timeout=ZK_MAX_CONNECTION_START_TIMEOUT, chroot=None):
"""Establish connection with Zk and return KazooClient."""
_LOGGER.debug('Connecting to %s', zkurl)
zkconnstr = zkurl[len('zookeeper://'):]
if zkconnstr.find('/') != -1:
# Chroot specified in connection.
assert chroot is None
chroot = zkconnstr[zkconnstr.find('/'):]
zkconnstr = zkconnstr[:zkconnstr.find('/')]
zk_retry = {
'delay': 0.2,
'backoff': 2,
'max_jitter': 0.2,
'max_delay': 1,
'max_tries': max_tries,
'ignore_expire': False,
}
connargs = {
'client_id': client_id,
'auth_data': [],
'connection_retry': zk_retry,
'command_retry': zk_retry,
}
if _ZK_PLUGIN_MOD:
zkclient = _ZK_PLUGIN_MOD.connect(zkurl, connargs)
else:
connargs['hosts'] = zkconnstr
_LOGGER.debug('Connecting to zookeeper: %r', connargs)
zkclient = kazoo.client.KazooClient(**connargs)
if listener is None:
listener = exit_on_disconnect
zkclient.add_listener(listener)
# This will CLOSE the connection and throw a time-out exception after
# trying max_tries
zkclient.start(timeout=timeout)
if chroot:
acl = make_default_acl(None)
path = []
chroot_components = chroot.split('/')
while chroot_components:
path.append(chroot_components.pop(0))
if len(path) > 1:
component = '/'.join(path)
if not zkclient.exists(component):
# TODO: need to compare acls if component exists.
try:
zkclient.create(component, '', makepath=True, acl=acl)
except kazoo.exceptions.KazooException:
_LOGGER.exception('chroot %s does not exist.', chroot)
raise
zkclient.chroot = chroot
return zkclient
class SequenceNodeWatch(object):
"""Sequential nodes watcher which keeps track of last node seen."""
def __init__(self, zkclient, func, delim, pattern, include_data):
self.zkclient = zkclient
self.func = func
self.last = None
self.delim = delim
self.pattern = pattern
self.include_data = include_data
def nodes(self, children):
"""Yield previously unseen node."""
if self.pattern:
children = [node for node in children
if node.startswith(self.pattern)]
seq_children = [(node[node.rfind(self.delim) + 1:], node)
for node in children if node.rfind(self.delim) > 0]
# Sort nodes by seq #
for seq, node in sorted(seq_children):
if seq > self.last:
self.last = seq
yield node
def invoke_callback(self, path, node):
"""Invokes callback for each new node."""
try:
fullpath = os.path.join(path, node)
data = None
stat = None
if self.include_data:
data, stat = self.zkclient.get(fullpath)
self.func(fullpath, data, stat)
# pylint: disable=W0702
except:
_LOGGER.critical("Unexpected error: %s", sys.exc_info()[0])
def on_child(self, event):
"""The watch function."""
if event.type == 'CHILD':
children = self.zkclient.get_children(event.path, self.on_child)
for node in self.nodes(children):
self.invoke_callback(event.path, node)
def watch_sequence(zkclient, path, func, delim='-', pattern=None,
include_data=False):
"""Watch sequential nodes under path, invoke function with new nodes.
When started, will invoke callback func with the list of sequence nodes,
remembering the last node.
For each node added, callback will be invoked only with newly added nodes.
Delimiter is used to identify the sequence number of the node name, which
can be anything. Optionally will filter nodes to start with given patter.
"""
watcher = SequenceNodeWatch(zkclient, func, delim, pattern, include_data)
def on_create(event):
"""Callback invoked when node is created."""
assert event.path == path
children = zkclient.get_children(path, watcher.on_child)
for node in watcher.nodes(children):
watcher.invoke_callback(path, node)
if zkclient.exists(path, on_create):
children = zkclient.get_children(path, watcher.on_child)
for node in watcher.nodes(children):
watcher.invoke_callback(path, node)
def _payload(data=None):
"""Converts payload to yaml."""
payload = ''
if data is not None:
if isinstance(data, str) or isinstance(data, unicode):
payload = data
else:
payload = yaml.dump(data)
return payload
def create(zkclient, path, data=None, acl=None, sequence=False,
default_acl=True, ephemeral=False):
"""Serialize data into Zk node, fail if node exists."""
payload = _payload(data)
if default_acl:
realacl = make_default_acl(acl)
else:
realacl = acl
return zkclient.create(path, payload, makepath=True, acl=realacl,
sequence=sequence, ephemeral=ephemeral)
def put(zkclient, path, data=None, acl=None, sequence=False, default_acl=True,
ephemeral=False, check_content=False):
"""Serialize data into Zk node, converting data to YAML.
Default acl is set to admin:all, anonymous:readonly. These acls are
appended to any addidional acls provided in the argument.
"""
payload = _payload(data)
# Default acl assumes world readable data, safe to log the payload. If
# default acl is not specified, do not log the payload as it may be
# private.
if default_acl:
realacl = make_default_acl(acl)
_LOGGER.debug('put (default_acl=%s): %s acl=%s seq=%s', default_acl,
path, realacl, sequence)
else:
realacl = acl
_LOGGER.debug('put %s *** acl=%s seq=%s', path, realacl, sequence)
try:
return zkclient.create(path, payload, makepath=True, acl=realacl,
sequence=sequence, ephemeral=ephemeral)
except kazoo.client.NodeExistsError:
# This will never happen for sequence node, so requestor knows the
# path.
#
# If there is not change, return None to indicate update was not done.
if check_content:
current, _metadata = zkclient.get(path)
if current == payload:
_LOGGER.debug('%s is up to date', path)
return None
zkclient.set(path, payload)
_LOGGER.debug('Setting ACL on %s to %r', path, realacl)
zkclient.set_acls(path, realacl)
return path
def update(zkclient, path, data, check_content=False):
"""Set data into Zk node, converting data to YAML."""
_LOGGER.debug('update %s', path)
payload = _payload(data)
if check_content:
current, _metadata = zkclient.get(path)
if current == payload:
return None
zkclient.set(path, payload)
return path
def get(zkclient, path, watcher=None, strict=True, need_metadata=False):
"""Read content of Zookeeper node and return YAML parsed object."""
data, metadata = zkclient.get(path, watch=watcher)
result = None
if data is not None:
try:
result = yaml.load(data)
except yaml.YAMLError:
if strict:
raise
else:
result = data
if need_metadata:
return result, metadata
else:
return result
def get_default(zkclient, path, watcher=None, strict=True, need_metadata=False,
default=None):
"""Read content of Zookeeper node, return default value if does not exist.
"""
try:
return get(zkclient, path,
watcher=watcher, strict=strict, need_metadata=need_metadata)
except kazoo.client.NoNodeError:
if need_metadata:
return default, None
else:
return default
def get_children_count(zkclient, path, exc_safe=True):
"""Gets the node children count."""
try:
_data, metadata = zkclient.get(path)
return metadata.children_count
except kazoo.client.NoNodeError:
if exc_safe:
return 0
else:
raise
def ensure_exists(zkclient, path, acl=None, sequence=False, data=None):
"""Creates path with correct ACL if path does not exist.
If the path does not exist, creates the path with proper acl.
If the path already exists, does not touch the content, but makes sure the
acl is correct.
"""
realacl = make_default_acl(acl)
try:
# new node has default empty data
newdata = _payload(data)
return zkclient.create(path, newdata, makepath=True, acl=realacl,
sequence=sequence)
except kazoo.client.NodeExistsError:
# if data not provided, we keep original data pristine
if data is not None:
newdata = _payload(data)
zkclient.set(path, newdata)
zkclient.set_acls(path, realacl)
return path
def ensure_deleted(zkclient, path, recursive=True):
"""Deletes the node if it exists."""
try:
_LOGGER.debug('Deleting %s', path)
if recursive:
for child in zkclient.get_children(path):
ensure_deleted(zkclient, os.path.join(path, child))
zkclient.delete(path)
except kazoo.client.NoNodeError:
_LOGGER.debug('Node %s does not exist.', path)
def exists(zk_client, zk_path, timeout=60):
"""wrapping the zk exists function with timeout"""
node_created_event = threading.Event()
def node_watch(event):
"""watch for node creation"""
if event.type == kazoo.protocol.states.EventType.CREATED:
node_created_event.set()
if not zk_client.exists(zk_path, watch=node_watch):
return node_created_event.wait(timeout)
return True
def list_match(zkclient, path, pattern, watcher=None):
"""Get a list of nodes matching pattern."""
children = zkclient.get_children(path, watch=watcher)
return [node for node in children if fnmatch.fnmatch(node, pattern)]
def wait(zk_client, zk_path, wait_exists, timeout=None):
"""Wait for node to be in a given state."""
node_created_event = threading.Event()
def node_watch(event):
"""watch for node events."""
_LOGGER.debug('Got event: %r', event)
created = (event.type == kazoo.protocol.states.EventType.CREATED)
deleted = (event.type == kazoo.protocol.states.EventType.DELETED)
if (wait_exists and created) or (not wait_exists and deleted):
node_created_event.set()
if wait_exists == bool(zk_client.exists(zk_path, watch=node_watch)):
return True
else:
_LOGGER.debug('Will wait for timeout: %r', timeout)
return node_created_event.wait(timeout)
def with_retry(func, *args, **kwargs):
"""Calls function with retry."""
zk_retry = kazoo.retry.KazooRetry(ignore_expire=False, max_tries=5)
return zk_retry(func, *args, **kwargs)
| apache-2.0 |
richard-willowit/odoo | addons/delivery/tests/test_delivery_cost.py | 6 | 5590 | # -*- coding: utf-8 -*-
from odoo.tests import common
from odoo.tools import float_compare
@common.at_install(False)
@common.post_install(True)
class TestDeliveryCost(common.TransactionCase):
def setUp(self):
super(TestDeliveryCost, self).setUp()
self.SaleOrder = self.env['sale.order']
self.SaleOrderLine = self.env['sale.order.line']
self.AccountAccount = self.env['account.account']
self.SaleConfigSetting = self.env['res.config.settings']
self.Product = self.env['product.product']
self.partner_18 = self.env.ref('base.res_partner_18')
self.pricelist = self.env.ref('product.list0')
self.product_4 = self.env.ref('product.product_product_4')
self.product_uom_unit = self.env.ref('product.product_uom_unit')
self.normal_delivery = self.env.ref('delivery.normal_delivery_carrier')
self.partner_4 = self.env.ref('base.res_partner_4')
self.partner_address_13 = self.env.ref('base.res_partner_address_13')
self.product_uom_hour = self.env.ref('product.product_uom_hour')
self.account_data = self.env.ref('account.data_account_type_revenue')
self.account_tag_operating = self.env.ref('account.account_tag_operating')
self.product_2 = self.env.ref('product.product_product_2')
self.product_category = self.env.ref('product.product_category_all')
self.free_delivery = self.env.ref('delivery.free_delivery_carrier')
# as the tests hereunder assume all the prices in USD, we must ensure
# that the company actually uses USD
self.env.user.company_id.write({'currency_id': self.env.ref('base.USD').id})
def test_00_delivery_cost(self):
# In order to test Carrier Cost
# Create sales order with Normal Delivery Charges
self.sale_normal_delivery_charges = self.SaleOrder.create({
'partner_id': self.partner_18.id,
'partner_invoice_id': self.partner_18.id,
'partner_shipping_id': self.partner_18.id,
'pricelist_id': self.pricelist.id,
'order_line': [(0, 0, {
'name': 'PC Assamble + 2GB RAM',
'product_id': self.product_4.id,
'product_uom_qty': 1,
'product_uom': self.product_uom_unit.id,
'price_unit': 750.00,
})],
'carrier_id': self.normal_delivery.id
})
# I add delivery cost in Sales order
self.a_sale = self.AccountAccount.create({
'code': 'X2020',
'name': 'Product Sales - (test)',
'user_type_id': self.account_data.id,
'tag_ids': [(6, 0, {
self.account_tag_operating.id
})]
})
self.product_consultant = self.Product.create({
'sale_ok': True,
'list_price': 75.0,
'standard_price': 30.0,
'uom_id': self.product_uom_hour.id,
'uom_po_id': self.product_uom_hour.id,
'name': 'Service',
'categ_id': self.product_category.id,
'type': 'service'
})
# I add delivery cost in Sales order
self.sale_normal_delivery_charges.get_delivery_price()
self.sale_normal_delivery_charges.set_delivery_line()
# I check sales order after added delivery cost
line = self.SaleOrderLine.search([('order_id', '=', self.sale_normal_delivery_charges.id),
('product_id', '=', self.sale_normal_delivery_charges.carrier_id.product_id.id)])
self.assertEqual(len(line), 1, "Delivery cost is not Added")
self.assertEqual(float_compare(line.price_subtotal, 10.0, precision_digits=2), 0,
"Delivery cost is not correspond.")
# I confirm the sales order
self.sale_normal_delivery_charges.action_confirm()
# Create one more sales order with Free Delivery Charges
self.delivery_sale_order_cost = self.SaleOrder.create({
'partner_id': self.partner_4.id,
'partner_invoice_id': self.partner_address_13.id,
'partner_shipping_id': self.partner_address_13.id,
'pricelist_id': self.pricelist.id,
'order_line': [(0, 0, {
'name': 'Service on demand',
'product_id': self.product_consultant.id,
'product_uom_qty': 24,
'product_uom': self.product_uom_hour.id,
'price_unit': 75.00,
}), (0, 0, {
'name': 'On Site Assistance',
'product_id': self.product_2.id,
'product_uom_qty': 30,
'product_uom': self.product_uom_hour.id,
'price_unit': 38.25,
})],
'carrier_id': self.free_delivery.id
})
# I add free delivery cost in Sales order
self.delivery_sale_order_cost.get_delivery_price()
self.delivery_sale_order_cost.set_delivery_line()
# I check sales order after adding delivery cost
line = self.SaleOrderLine.search([('order_id', '=', self.delivery_sale_order_cost.id),
('product_id', '=', self.delivery_sale_order_cost.carrier_id.product_id.id)])
self.assertEqual(len(line), 1, "Delivery cost is not Added")
self.assertEqual(float_compare(line.price_subtotal, 0, precision_digits=2), 0,
"Delivery cost is not correspond.")
# I set default delivery policy
self.default_delivery_policy = self.SaleConfigSetting.create({})
self.default_delivery_policy.execute()
| gpl-3.0 |
vitorio/ocropodium | ocradmin/presets/management/commands/importscripts.py | 1 | 3008 | """
Import a script file or files into the database.
"""
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from ocradmin.presets.models import Preset
from django.core.exceptions import ImproperlyConfigured
from django.utils import simplejson as json
class Command(BaseCommand):
args = "<scriptfile1> ... <scriptfileN>"
help = "Import JSON scripts into the presets database"
option_list = BaseCommand.option_list + (
make_option(
"-d",
"--description",
action="store",
type="string",
dest="description",
default="",
help="Description for the given preset"),
make_option(
"-n",
"--name",
action="store",
type="string",
dest="name",
help="Name of the preset. If not given, inferred from the file name"),
make_option(
"-t",
"--tags",
action="store",
type="string",
dest="tags",
default="",
help="Comma separated list of tags"),
)
def handle(self, *args, **options):
try:
adminuser = User.objects.get(is_superuser=True)
except User.DoesNotExist:
raise ImproperlyConfigured(
"An admin user must exist before presets can be imported.")
if not args:
raise CommandError("Scripts to import must be given as arguments.")
for f in args:
if not os.path.exists(f):
raise CommandError("Script file does not exist: %s" % f)
name = options.get("name")
if name is None:
name = os.path.splitext(os.path.basename(f))[0].capitalize()
if name.strip() == "":
raise CommandError("Script must have a valid name")
tags = " ".join([t.strip() for t in options.get("tags", "").split(",")])
description = options.get("description")
with open(f) as fh:
data = fh.read()
try:
script = json.loads(data)
meta = script.get("__meta")
if meta is not None:
name = meta.get("name", name)
description = meta.get("description", options.get("description"))
tags = meta.get("tags", tags)
except json.JSONDecodeError, err:
raise CommandError("Invalid script: JSON data could not be decoded.")
p = Preset(
name=name,
user=adminuser,
tags=tags,
description=description,
data=data,
)
p.save()
self.stdout.write("Successfully import preset: %s\n" % name)
| apache-2.0 |
p4datasystems/CarnotKEdist | dist/Lib/test/test_binop.py | 126 | 10743 | """Tests for binary operators on subtypes of built-in types."""
import unittest
from test import test_support
def gcd(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def isint(x):
"""Test whether an object is an instance of int or long."""
return isinstance(x, int) or isinstance(x, long)
def isnum(x):
"""Test whether an object is an instance of a built-in numeric type."""
for T in int, long, float, complex:
if isinstance(x, T):
return 1
return 0
def isRat(x):
"""Test wheter an object is an instance of the Rat class."""
return isinstance(x, Rat)
class Rat(object):
"""Rational number implemented as a normalized pair of longs."""
__slots__ = ['_Rat__num', '_Rat__den']
def __init__(self, num=0L, den=1L):
"""Constructor: Rat([num[, den]]).
The arguments must be ints or longs, and default to (0, 1)."""
if not isint(num):
raise TypeError, "Rat numerator must be int or long (%r)" % num
if not isint(den):
raise TypeError, "Rat denominator must be int or long (%r)" % den
# But the zero is always on
if den == 0:
raise ZeroDivisionError, "zero denominator"
g = gcd(den, num)
self.__num = long(num//g)
self.__den = long(den//g)
def _get_num(self):
"""Accessor function for read-only 'num' attribute of Rat."""
return self.__num
num = property(_get_num, None)
def _get_den(self):
"""Accessor function for read-only 'den' attribute of Rat."""
return self.__den
den = property(_get_den, None)
def __repr__(self):
"""Convert a Rat to an string resembling a Rat constructor call."""
return "Rat(%d, %d)" % (self.__num, self.__den)
def __str__(self):
"""Convert a Rat to a string resembling a decimal numeric value."""
return str(float(self))
def __float__(self):
"""Convert a Rat to a float."""
return self.__num*1.0/self.__den
def __int__(self):
"""Convert a Rat to an int; self.den must be 1."""
if self.__den == 1:
try:
return int(self.__num)
except OverflowError:
raise OverflowError, ("%s too large to convert to int" %
repr(self))
raise ValueError, "can't convert %s to int" % repr(self)
def __long__(self):
"""Convert a Rat to an long; self.den must be 1."""
if self.__den == 1:
return long(self.__num)
raise ValueError, "can't convert %s to long" % repr(self)
def __add__(self, other):
"""Add two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den + other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) + other
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two Rats, or a Rat and a number."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(self.__num*other.__den - other.__num*self.__den,
self.__den*other.__den)
if isnum(other):
return float(self) - other
return NotImplemented
def __rsub__(self, other):
"""Subtract two Rats, or a Rat and a number (reversed args)."""
if isint(other):
other = Rat(other)
if isRat(other):
return Rat(other.__num*self.__den - self.__num*other.__den,
self.__den*other.__den)
if isnum(other):
return other - float(self)
return NotImplemented
def __mul__(self, other):
"""Multiply two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__num, self.__den*other.__den)
if isint(other):
return Rat(self.__num*other, self.__den)
if isnum(other):
return float(self)*other
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
"""Divide two Rats, or a Rat and a number."""
if isRat(other):
return Rat(self.__num*other.__den, self.__den*other.__num)
if isint(other):
return Rat(self.__num, self.__den*other)
if isnum(other):
return float(self) / other
return NotImplemented
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide two Rats, or a Rat and a number (reversed args)."""
if isRat(other):
return Rat(other.__num*self.__den, other.__den*self.__num)
if isint(other):
return Rat(other*self.__den, self.__num)
if isnum(other):
return other / float(self)
return NotImplemented
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
"""Divide two Rats, returning the floored result."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self/other
return x.__num // x.__den
def __rfloordiv__(self, other):
"""Divide two Rats, returning the floored result (reversed args)."""
x = other/self
return x.__num // x.__den
def __divmod__(self, other):
"""Divide two Rats, returning quotient and remainder."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
x = self//other
return (x, self - other * x)
def __rdivmod__(self, other):
"""Divide two Rats, returning quotient and remainder (reversed args)."""
if isint(other):
other = Rat(other)
elif not isRat(other):
return NotImplemented
return divmod(other, self)
def __mod__(self, other):
"""Take one Rat modulo another."""
return divmod(self, other)[1]
def __rmod__(self, other):
"""Take one Rat modulo another (reversed args)."""
return divmod(other, self)[1]
def __eq__(self, other):
"""Compare two Rats for equality."""
if isint(other):
return self.__den == 1 and self.__num == other
if isRat(other):
return self.__num == other.__num and self.__den == other.__den
if isnum(other):
return float(self) == other
return NotImplemented
def __ne__(self, other):
"""Compare two Rats for inequality."""
return not self == other
# Silence Py3k warning
__hash__ = None
class RatTestCase(unittest.TestCase):
"""Unit tests for Rat class and its support utilities."""
def test_gcd(self):
self.assertEqual(gcd(10, 12), 2)
self.assertEqual(gcd(10, 15), 5)
self.assertEqual(gcd(10, 11), 1)
self.assertEqual(gcd(100, 15), 5)
self.assertEqual(gcd(-10, 2), -2)
self.assertEqual(gcd(10, -2), 2)
self.assertEqual(gcd(-10, -2), -2)
for i in range(1, 20):
for j in range(1, 20):
self.assertTrue(gcd(i, j) > 0)
self.assertTrue(gcd(-i, j) < 0)
self.assertTrue(gcd(i, -j) > 0)
self.assertTrue(gcd(-i, -j) < 0)
def test_constructor(self):
a = Rat(10, 15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10L, 15L)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(10, -15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, 15)
self.assertEqual(a.num, -2)
self.assertEqual(a.den, 3)
a = Rat(-10, -15)
self.assertEqual(a.num, 2)
self.assertEqual(a.den, 3)
a = Rat(7)
self.assertEqual(a.num, 7)
self.assertEqual(a.den, 1)
try:
a = Rat(1, 0)
except ZeroDivisionError:
pass
else:
self.fail("Rat(1, 0) didn't raise ZeroDivisionError")
for bad in "0", 0.0, 0j, (), [], {}, None, Rat, unittest:
try:
a = Rat(bad)
except TypeError:
pass
else:
self.fail("Rat(%r) didn't raise TypeError" % bad)
try:
a = Rat(1, bad)
except TypeError:
pass
else:
self.fail("Rat(1, %r) didn't raise TypeError" % bad)
def test_add(self):
self.assertEqual(Rat(2, 3) + Rat(1, 3), 1)
self.assertEqual(Rat(2, 3) + 1, Rat(5, 3))
self.assertEqual(1 + Rat(2, 3), Rat(5, 3))
self.assertEqual(1.0 + Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) + 1.0, 1.5)
def test_sub(self):
self.assertEqual(Rat(7, 2) - Rat(7, 5), Rat(21, 10))
self.assertEqual(Rat(7, 5) - 1, Rat(2, 5))
self.assertEqual(1 - Rat(3, 5), Rat(2, 5))
self.assertEqual(Rat(3, 2) - 1.0, 0.5)
self.assertEqual(1.0 - Rat(1, 2), 0.5)
def test_mul(self):
self.assertEqual(Rat(2, 3) * Rat(5, 7), Rat(10, 21))
self.assertEqual(Rat(10, 3) * 3, 10)
self.assertEqual(3 * Rat(10, 3), 10)
self.assertEqual(Rat(10, 5) * 0.5, 1.0)
self.assertEqual(0.5 * Rat(10, 5), 1.0)
def test_div(self):
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
def test_floordiv(self):
self.assertEqual(Rat(10) // Rat(4), 2)
self.assertEqual(Rat(10, 3) // Rat(4, 3), 2)
self.assertEqual(Rat(10) // 4, 2)
self.assertEqual(10 // Rat(4), 2)
def test_eq(self):
self.assertEqual(Rat(10), Rat(20, 2))
self.assertEqual(Rat(10), 10)
self.assertEqual(10, Rat(10))
self.assertEqual(Rat(10), 10.0)
self.assertEqual(10.0, Rat(10))
def test_future_div(self):
exec future_test
# XXX Ran out of steam; TO DO: divmod, div, future division
future_test = """
from __future__ import division
self.assertEqual(Rat(10, 3) / Rat(5, 7), Rat(14, 3))
self.assertEqual(Rat(10, 3) / 3, Rat(10, 9))
self.assertEqual(2 / Rat(5), Rat(2, 5))
self.assertEqual(3.0 * Rat(1, 2), 1.5)
self.assertEqual(Rat(1, 2) * 3.0, 1.5)
self.assertEqual(eval('1/2'), 0.5)
"""
def test_main():
test_support.run_unittest(RatTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
guewen/OpenUpgrade | addons/google_account/__openerp__.py | 68 | 1459 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Users',
'version': '1.0',
'category': 'Tools',
'description': """
The module adds google user in res user.
========================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base_setup'],
'data': [
'google_account_data.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cntnboys/410Lab6 | build/django/django/db/models/sql/datastructures.py | 71 | 2522 | """
Useful auxiliary data structures for query construction. Not useful outside
the SQL domain.
"""
class Col(object):
def __init__(self, alias, target, source):
self.alias, self.target, self.source = alias, target, source
def as_sql(self, qn, connection):
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
@property
def output_field(self):
return self.source
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.source)
def get_group_by_cols(self):
return [(self.alias, self.target.column)]
def get_lookup(self, name):
return self.output_field.get_lookup(name)
def get_transform(self, name):
return self.output_field.get_transform(name)
def prepare(self):
return self
class EmptyResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, names_pos, path_with_names):
self.level = names_pos
# The path travelled, this includes the path to the multijoin.
self.names_with_path = path_with_names
class Empty(object):
pass
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type):
self.col = col
self.lookup_type = lookup_type
def relabeled_clone(self, change_map):
return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple(qn(c) for c in self.col)
else:
col = self.col
return connection.ops.date_trunc_sql(self.lookup_type, col), []
class DateTime(object):
"""
Add a datetime selection column.
"""
def __init__(self, col, lookup_type, tzname):
self.col = col
self.lookup_type = lookup_type
self.tzname = tzname
def relabeled_clone(self, change_map):
return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple(qn(c) for c in self.col)
else:
col = self.col
return connection.ops.datetime_trunc_sql(self.lookup_type, col, self.tzname)
| apache-2.0 |
songmonit/CTTMSONLINE | addons/survey/wizard/__init__.py | 385 | 1026 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_email_compose_message
| agpl-3.0 |
tdliu/hoop-picks | lib/requests/packages/chardet/latin1prober.py | 1778 | 5232 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| apache-2.0 |
sudheesh001/RFID-DBSync | venv/lib/python2.7/site-packages/setuptools/tests/test_upload_docs.py | 522 | 2139 | """build_ext tests
"""
import sys, os, shutil, tempfile, unittest, site, zipfile
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestUploadDocsTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.upload_dir = os.path.join(self.dir, 'build')
os.mkdir(self.upload_dir)
# A test document.
f = open(os.path.join(self.upload_dir, 'index.html'), 'w')
f.write("Hello world.")
f.close()
# An empty folder.
os.mkdir(os.path.join(self.upload_dir, 'empty'))
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_create_zipfile(self):
# Test to make sure zipfile creation handles common cases.
# This explicitly includes a folder containing an empty folder.
dist = Distribution()
cmd = upload_docs(dist)
cmd.upload_dir = self.upload_dir
cmd.target_dir = self.upload_dir
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, 'foo.zip')
try:
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
zip_file = zipfile.ZipFile(tmp_file) # woh...
assert zip_file.namelist() == ['index.html']
zip_file.close()
finally:
shutil.rmtree(tmp_dir)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.