gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
# File created on Nov 27 Jan 2012
from __future__ import division
__author__ = "Kishori M Konwar, Niels W. Hanson"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import re
import sys
from optparse import OptionParser, OptionGroup
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed \"source MetaPathwayrc\""""
print """ """
sys.exit(3)
script_name = sys.argv[0]
usage= script_name + """ --subsys_file subsys_file --subsys2peg subsys2peg_file -o output"""
parser = OptionParser(usage)
parser.add_option( "--subsys_file", dest="subsys_file",
help='the subsys.txt file, where the subsystems are listed')
parser.add_option( "--subsys2peg", dest="subsys2peg_file",
help='the subsystem to peg file')
parser.add_option( "-o", dest="output_file",
help='the output file')
def fprintf(file, fmt, *args):
file.write(fmt % args)
def check_arguments(opts, args):
if opts.subsys_file == None:
print """Must have the \"subsys.txt\" file"""
return False
if opts.subsys2peg_file == None:
print """Must have the \"subsys2peg\" file"""
return False
if opts.output_file == None:
print """Must have an output file"""
return False
return True
def split_attributes(str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
insert_attribute(attributes, attribStr)
return attributes
def create_dictionary(databasemapfile, annot_map):
seq_beg_pattern = re.compile(">")
dbmapfile = open( databasemapfile,'r')
lines=dbmapfile.readlines()
dbmapfile.close()
for line in lines:
if seq_beg_pattern.search(line):
words = line.rstrip().split()
name = words[0].replace('>','',1)
words.pop(0)
annotation = ' '.join(words)
annot_map[name]= annotation
def process_subsys2peg_file(subsys2peg, peg2subsys, org_file):
try:
orgfile = open(org_file,'r')
except IOError:
print "Cannot open " + str(org_file)
lines = orgfile.readlines()
for line in lines:
hits = line.split('\t')
newhits = []
for hit in hits:
if hit.strip():
newhits.append(hit.strip())
if len(newhits) > 2:
if not newhits[1] in subsys2peg:
subsys2peg[newhits[1]]= []
subsys2peg[newhits[1]].append(newhits[2])
peg2subsys[newhits[2]]= newhits[1]
def process_function_file(catalogue, func_file):
try:
funcfile = open(func_file,'r')
except IOError:
print "Cannot open " + str(func_file)
lines=funcfile.readlines()
funcfile.close()
for line in lines:
hits = line.split("\t")
newhits = []
if len(hits) > 2:
for hit in hits:
if hit.strip():
newhits.append(hit.strip())
tempcatalogue = catalogue
for i in range(0, len(newhits) ):
if not newhits[i] in tempcatalogue:
tempcatalogue[newhits[i]]= {}
tempcatalogue = tempcatalogue[newhits[i]]
def process_catalogue_file(catalogue, key, subsys2peg, depth):
prefixTab = ""
for i in range(0, depth):
prefixTab = prefixTab + "\t"
if catalogue[key].keys():
for skey in catalogue[key]:
print prefixTab + skey
process_catalogue_file(catalogue[key], skey, subsys2peg, depth + 1)
# else:
# if key in subsys2peg:
# for skey in subsys2peg[key]:
# print prefixTab + skey
# else:
# print prefixTab + key
def process_whog_file(whog_file,whog_scheme) :
square_brace = re.compile("\[([A-Z]*)\]\s*(\S*)\s*(.*)")
map_pattern = re.compile("(.*):(.*)")
try:
whogfile = open(whog_file,'r')
except IOError:
print "Cannot open " + str(whog_file)
lines=whogfile.readlines()
whogfile.close()
for line in lines:
hits = square_brace.search(line)
isCogLine = False
if hits != None and len(hits.groups())==3:
letter= hits.group(1).strip()
cogid = hits.group(2).strip()
function = hits.group(3).strip()
isCogLine = True
if not isCogLine:
hits = map_pattern.search(line)
else:
hits = None
isMapLine = False
if hits!=None and len(hits.groups())==2:
triplet= hits.group(1).strip()
seqid= hits.group(2).strip()
isMapLine = True
if cogid!=None:
if not cogid in whog_scheme:
whog_scheme[cogid] = { "function":None, "seqmap":None, "letter":None }
whog_scheme[cogid]["function"] = function
whog_scheme[cogid]["letter"] = letter
whog_scheme[cogid]["seqmap"] = {}
if isMapLine:
seqids = [ x.strip() for x in seqid.split() ]
for s in seqids:
whog_scheme[cogid]["seqmap"][s]=triplet
def write_output(whog_scheme, letter_function_map, trip_organism_map, seqid_ginumber, output_file):
try:
outputfile = open(output_file,'w')
except IOError:
print "Cannot open output file"
for cogid in whog_scheme:
for seqid in whog_scheme[cogid]['seqmap']:
output = ""
letters = list(whog_scheme[cogid]["letter"])
output+= ">" + seqid + " " + cogid
output+= " # Protein_GI_number: "
if seqid in seqid_ginumber:
output+= str(seqid_ginumber[seqid])
else:
output+= "00000000"
output+= " # Func_class: " + whog_scheme[cogid]["letter"] + " "
letters = list(whog_scheme[cogid]["letter"])
for x in letters:
output+=letter_function_map[x] + " "
output+=" # Function: "+ whog_scheme[cogid]["function"]
output+=" # Organism: " + trip_organism_map[whog_scheme[cogid]['seqmap'][seqid]]
fprintf(outputfile, "%s\n", output);
outputfile.close()
def process_ginumber_file(myva_gb_file) :
try:
myvagbfile = open(myva_gb_file,'r')
except IOError:
print "Cannot open " + str(myva_gb_file)
lines=myvagbfile.readlines()
myvagbfile.close()
seqid_ginumber = {}
for line in lines:
words = [x.strip() for x in line.rstrip().split() ]
if len(words)==2:
seqid_ginumber[words[0]] = words[1]
return seqid_ginumber
# the main function
def main(argv):
(opts, args) = parser.parse_args()
if not check_arguments(opts, args):
print usage
sys.exit(0)
catalogue = {}
catalogue['SEED'] = {}
process_function_file(catalogue["SEED"], opts.subsys_file)
subsys2peg = {}
peg2subsys = {}
process_subsys2peg_file(subsys2peg, peg2subsys, opts.subsys2peg_file)
process_catalogue_file(catalogue,'SEED', subsys2peg, 0)
# write_output(whog_scheme, letter_function_map, functionTriplets_organism_map, seqid_ginumber, opts.output_file)
# the main function of metapaths
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright (c) 2011-2014 Greg Holt
# Copyright (c) 2012-2013 Peter Portante
# Copyright (c) 2012 Iryoung Jeong
# Copyright (c) 2012 Michael Barton
# Copyright (c) 2013 Alex Gaynor
# Copyright (c) 2013 Chuck Thier
# Copyright (c) 2013 David Goetz
# Copyright (c) 2015 Donagh McCabe
# Copyright (c) 2013 Greg Lange
# Copyright (c) 2013 John Dickinson
# Copyright (c) 2013 Kun Huang
# Copyright (c) 2013 Richard Hawkins
# Copyright (c) 2013 Samuel Merritt
# Copyright (c) 2013 Shri Javadekar
# Copyright (c) 2013 Tong Li
# Copyright (c) 2013 ZhiQiang Fan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import itertools
import mock
import unittest
from hashlib import sha1
from time import time, strftime, gmtime
from swift.common.middleware import tempauth, tempurl
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from swift.common import utils
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter(
itertools.repeat((
'404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'},
'')))
self.request = None
def __call__(self, env, start_response):
self.calls += 1
self.request = Request.blank('', environ=env)
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.request)
if resp:
return resp(env, start_response)
status, headers, body = next(self.status_headers_body_iter)
return Response(status=status, headers=headers,
body=body)(env, start_response)
class TestTempURL(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({'reseller_prefix': ''})(self.app)
self.tempurl = tempurl.filter_factory({})(self.auth)
def _make_request(self, path, environ=None, keys=(), container_keys=None,
**kwargs):
if environ is None:
environ = {}
_junk, account, _junk, _junk = utils.split_path(path, 2, 4, True)
self._fake_cache_environ(environ, account, keys,
container_keys=container_keys)
req = Request.blank(path, environ=environ, **kwargs)
return req
def _fake_cache_environ(self, environ, account, keys, container_keys=None):
"""
Fake out the caching layer for get_account_info(). Injects account data
into environ such that keys are the tempurl keys, if set.
"""
meta = {'swash': 'buckle'}
for idx, key in enumerate(keys):
meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
ic = environ.setdefault('swift.infocache', {})
ic['account/' + account] = {
'status': 204,
'container_count': '0',
'total_object_count': '0',
'bytes': '0',
'meta': meta}
meta = {}
for i, key in enumerate(container_keys or []):
meta_name = 'Temp-URL-key' + (("-%d" % (i + 1) if i else ""))
meta[meta_name] = key
container_cache_key = 'container/' + account + '/c'
ic.setdefault(container_cache_key, {'meta': meta})
def test_passthrough(self):
resp = self._make_request('/v1/a/c/o').get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertNotIn('Temp URL invalid', resp.body)
def test_allow_options(self):
self.app.status_headers_body_iter = iter([('200 Ok', {}, '')])
resp = self._make_request(
'/v1/a/c/o?temp_url_sig=abcde&temp_url_expires=12345',
environ={'REQUEST_METHOD': 'OPTIONS'}).get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
def assert_valid_sig(self, expires, path, keys, sig, environ=None,
prefix=None):
if not environ:
environ = {}
environ['QUERY_STRING'] = 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)
if prefix is not None:
environ['QUERY_STRING'] += '&temp_url_prefix=%s' % prefix
req = self._make_request(path, keys=keys, environ=environ)
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="o"; ' + "filename*=UTF-8''o")
self.assertEqual(resp.headers['expires'],
strftime('%a, %d %b %Y %H:%M:%S GMT',
gmtime(expires)))
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
self.assert_valid_sig(expires, path, [key], sig)
def test_get_valid_key2(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key1 = 'abc123'
key2 = 'def456'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig1 = hmac.new(key1, hmac_body, sha1).hexdigest()
sig2 = hmac.new(key2, hmac_body, sha1).hexdigest()
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, [key1, key2], sig)
def test_get_valid_container_keys(self):
ic = {}
environ = {'swift.infocache': ic}
# Add two static container keys
container_keys = ['me', 'other']
meta = {}
for idx, key in enumerate(container_keys):
meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
ic['container/a/c'] = {'meta': meta}
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key1 = 'me'
key2 = 'other'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig1 = hmac.new(key1, hmac_body, sha1).hexdigest()
sig2 = hmac.new(key2, hmac_body, sha1).hexdigest()
account_keys = []
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, account_keys, sig, environ)
@mock.patch('swift.common.middleware.tempurl.time', return_value=0)
def test_get_valid_with_filename(self, mock_time):
method = 'GET'
expires = (((24 + 1) * 60 + 1) * 60) + 1
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="bob %22killer%22.txt"; ' +
"filename*=UTF-8''bob%20%22killer%22.txt")
self.assertIn('expires', resp.headers)
self.assertEqual('Fri, 02 Jan 1970 01:01:01 GMT',
resp.headers['expires'])
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_valid_with_filename(self):
method = 'HEAD'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob_killer.txt' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="bob_killer.txt"; ' +
"filename*=UTF-8''bob_killer.txt")
def test_head_and_get_headers_match(self):
method = 'HEAD'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
% (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
get_method = 'GET'
get_hmac_body = '%s\n%s\n%s' % (get_method, expires, path)
get_sig = hmac.new(key, get_hmac_body, sha1).hexdigest()
get_req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
% (get_sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
get_resp = get_req.get_response(self.tempurl)
self.assertEqual(resp.headers, get_resp.headers)
@mock.patch('swift.common.middleware.tempurl.time', return_value=0)
def test_get_valid_with_filename_and_inline(self, mock_time):
method = 'GET'
expires = 1
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt&inline=' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'inline; filename="bob %22killer%22.txt"; ' +
"filename*=UTF-8''bob%20%22killer%22.txt")
self.assertIn('expires', resp.headers)
self.assertEqual('Thu, 01 Jan 1970 00:00:01 GMT',
resp.headers['expires'])
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid_with_inline(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'inline=' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'], 'inline')
self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid_with_prefix(self):
method = 'GET'
expires = int(time() + 86400)
prefix = 'p1/p2/'
sig_path = 'prefix:/v1/a/c/' + prefix
query_path = '/v1/a/c/' + prefix + 'o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, sig_path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
self.assert_valid_sig(expires, query_path, [key], sig, prefix=prefix)
query_path = query_path[:-1] + 'p3/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, sig_path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
self.assert_valid_sig(expires, query_path, [key], sig, prefix=prefix)
def test_get_valid_with_prefix_empty(self):
method = 'GET'
expires = int(time() + 86400)
sig_path = 'prefix:/v1/a/c/'
query_path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, sig_path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
self.assert_valid_sig(expires, query_path, [key], sig, prefix='')
def test_obj_odd_chars(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/a\r\nb'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="a%0D%0Ab"; ' +
"filename*=UTF-8''a%0D%0Ab")
self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_odd_chars_in_content_disposition_metadata(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
headers = [('Content-Disposition', 'attachment; filename="fu\nbar"')]
self.tempurl.app = FakeApp(iter([('200 Ok', headers, '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="fu%0Abar"')
self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="o"; ' +
"filename*=UTF-8''o")
self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_filename_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=/i/want/this/just/as/it/is/' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
resp.headers['content-disposition'],
'attachment; filename="/i/want/this/just/as/it/is/"; ' +
"filename*=UTF-8''/i/want/this/just/as/it/is/")
self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid_but_404(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('content-disposition', resp.headers)
self.assertNotIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_put_not_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_put_valid(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_not_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_missing_sig(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_expires=%s' % expires})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_missing_expires(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s' % sig})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_bad_path(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_no_key(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_head_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_allowed_by_post(self):
method = 'POST'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_otherwise_not_allowed(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
# Deliberately fudge expires to show HEADs aren't just automatically
# allowed.
expires += 1
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Www-Authenticate', resp.headers)
def test_post_when_forbidden_by_config(self):
self.tempurl.conf['methods'].remove('POST')
method = 'POST'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'POST',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_delete_when_forbidden_by_config(self):
self.tempurl.conf['methods'].remove('DELETE')
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_delete_allowed(self):
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
def test_unknown_not_allowed(self):
method = 'UNKNOWN'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'UNKNOWN',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_authorize_limits_scope(self):
req_other_object = Request.blank("/v1/a/c/o2")
req_other_container = Request.blank("/v1/a/c2/o2")
req_other_account = Request.blank("/v1/a2/c2/o2")
key_kwargs = {
'keys': ['account-key', 'shared-key'],
'container_keys': ['container-key', 'shared-key'],
}
# A request with the account key limits the pre-authed scope to the
# account level.
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new('account-key', hmac_body, sha1).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
# make request will setup the environ cache for us
req = self._make_request(path + qs, **key_kwargs)
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404) # sanity check
authorize = req.environ['swift.authorize']
# Requests for other objects happen if, for example, you're
# downloading a large object or creating a large-object manifest.
oo_resp = authorize(req_other_object)
self.assertIsNone(oo_resp)
oc_resp = authorize(req_other_container)
self.assertIsNone(oc_resp)
oa_resp = authorize(req_other_account)
self.assertEqual(oa_resp.status_int, 401)
# A request with the container key limits the pre-authed scope to
# the container level; a different container in the same account is
# out of scope and thus forbidden.
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new('container-key', hmac_body, sha1).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
req = self._make_request(path + qs, **key_kwargs)
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404) # sanity check
authorize = req.environ['swift.authorize']
oo_resp = authorize(req_other_object)
self.assertIsNone(oo_resp)
oc_resp = authorize(req_other_container)
self.assertEqual(oc_resp.status_int, 401)
oa_resp = authorize(req_other_account)
self.assertEqual(oa_resp.status_int, 401)
# If account and container share a key (users set these, so this can
# happen by accident, stupidity, *or* malice!), limit the scope to
# account level. This prevents someone from shrinking the scope of
# account-level tempurls by reusing one of the account's keys on a
# container.
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new('shared-key', hmac_body, sha1).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
req = self._make_request(path + qs, **key_kwargs)
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404) # sanity check
authorize = req.environ['swift.authorize']
oo_resp = authorize(req_other_object)
self.assertIsNone(oo_resp)
oc_resp = authorize(req_other_container)
self.assertIsNone(oc_resp)
oa_resp = authorize(req_other_account)
self.assertEqual(oa_resp.status_int, 401)
def test_changed_path_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path + '2', keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_changed_sig_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if sig[-1] != '0':
sig = sig[:-1] + '0'
else:
sig = sig[:-1] + '1'
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_changed_expires_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires + 1)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_different_key_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key + '2'],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_no_prefix_match_invalid(self):
method = 'GET'
expires = int(time() + 86400)
sig_path = 'prefix:/v1/a/c/p1/p2/'
query_path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, sig_path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
query_path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s&temp_url_prefix=%s' %
(sig, expires, 'p1/p2/')})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_object_url_with_prefix_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s&temp_url_prefix=o' %
(sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_disallowed_header_object_manifest(self):
self.tempurl = tempurl.filter_factory({})(self.auth)
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
for method in ('PUT', 'POST'):
for hdr, value in [('X-Object-Manifest', 'private/secret'),
('X-Symlink-Target', 'cont/symlink')]:
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, method=method, keys=[key],
headers={hdr: value},
environ={'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s'
% (sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 400)
self.assertTrue('header' in resp.body)
self.assertTrue('not allowed' in resp.body)
self.assertTrue(hdr in resp.body)
def test_removed_incoming_header(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-remove-this': 'value'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('x-remove-this', self.app.request.headers)
def test_removed_incoming_headers_match(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this-*',
'incoming_allow_headers': 'x-remove-this-except-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-remove-this-one': 'value1',
'x-remove-this-except-this': 'value2'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('x-remove-this-one', self.app.request.headers)
self.assertEqual(
self.app.request.headers['x-remove-this-except-this'], 'value2')
def test_allow_trumps_incoming_header_conflict(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-conflict-header',
'incoming_allow_headers': 'x-conflict-header'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-conflict-header': 'value'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertTrue('x-conflict-header' in self.app.request.headers)
def test_allow_trumps_incoming_header_startswith_conflict(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-conflict-header-*',
'incoming_allow_headers': 'x-conflict-header-*'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-conflict-header-test': 'value'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertTrue('x-conflict-header-test' in self.app.request.headers)
def test_removed_outgoing_header(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-one-a'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertNotIn('x-test-header-one-a', resp.headers)
self.assertEqual(resp.headers['x-test-header-two-a'], 'value2')
def test_removed_outgoing_headers_match(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-two-*',
'outgoing_allow_headers': 'x-test-header-two-b'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['x-test-header-one-a'], 'value1')
self.assertNotIn('x-test-header-two-a', resp.headers)
self.assertEqual(resp.headers['x-test-header-two-b'], 'value3')
def test_allow_trumps_outgoing_header_conflict(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-conflict-header',
'outgoing_allow_headers': 'x-conflict-header'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', {
'X-Conflict-Header': 'value'}, '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertTrue('x-conflict-header' in resp.headers)
self.assertEqual(resp.headers['x-conflict-header'], 'value')
def test_allow_trumps_outgoing_header_startswith_conflict(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-conflict-header-*',
'outgoing_allow_headers': 'x-conflict-header-*'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', {
'X-Conflict-Header-Test': 'value'}, '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertTrue('x-conflict-header-test' in resp.headers)
self.assertEqual(resp.headers['x-conflict-header-test'], 'value')
def test_get_path_parts(self):
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}),
('a', 'c', 'o'))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}),
('a', 'c', 'o'))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/v1/a/c/o'}),
('a', 'c', 'o'))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'POST', 'PATH_INFO': '/v1/a/c/o'}),
('a', 'c', 'o'))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'DELETE', 'PATH_INFO': '/v1/a/c/o'}),
('a', 'c', 'o'))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'UNKNOWN', 'PATH_INFO': '/v1/a/c/o'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c//////'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c///o///'}),
('a', 'c', '//o///'))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a//o'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1//c/o'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '//a/c/o'}),
(None, None, None))
self.assertEqual(self.tempurl._get_path_parts({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v2/a/c/o'}),
(None, None, None))
def test_get_temp_url_info(self):
s = 'f5d5051bddf5df7e27c628818738334f'
e_ts = int(time() + 86400)
e_8601 = strftime(tempurl.EXPIRES_ISO8601_FORMAT, gmtime(e_ts))
for e in (e_ts, e_8601):
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
s, e)}),
(s, e_ts, None, None, None))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING':
'temp_url_sig=%s&temp_url_expires=%s&temp_url_prefix=%s'
% (s, e, 'prefix')}),
(s, e_ts, 'prefix', None, None))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bobisyouruncle' % (s, e)}),
(s, e_ts, None, 'bobisyouruncle', None))
self.assertEqual(
self.tempurl._get_temp_url_info({}),
(None, None, None, None, None))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_expires=%s' % e}),
(None, e_ts, None, None, None))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s' % s}),
(s, None, None, None, None))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=bad' % (
s)}),
(s, 0, None, None, None))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'inline=' % (s, e)}),
(s, e_ts, None, None, True))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bobisyouruncle&inline=' % (s, e)}),
(s, e_ts, None, 'bobisyouruncle', True))
e_ts = int(time() - 1)
e_8601 = strftime(tempurl.EXPIRES_ISO8601_FORMAT, gmtime(e_ts))
for e in (e_ts, e_8601):
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
s, e)}),
(s, 0, None, None, None))
# Offsets not supported (yet?).
e_8601 = strftime('%Y-%m-%dT%H:%M:%S+0000', gmtime(e_ts))
self.assertEqual(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
s, e_8601)}),
(s, 0, None, None, None))
def test_get_hmacs(self):
self.assertEqual(
self.tempurl._get_hmacs(
{'REQUEST_METHOD': 'GET'}, 1, '/v1/a/c/o',
[('abc', 'account')]),
[('026d7f7cc25256450423c7ad03fc9f5ffc1dab6d', 'account')])
self.assertEqual(
self.tempurl._get_hmacs(
{'REQUEST_METHOD': 'HEAD'}, 1, '/v1/a/c/o',
[('abc', 'account')], request_method='GET'),
[('026d7f7cc25256450423c7ad03fc9f5ffc1dab6d', 'account')])
def test_invalid(self):
def _start_response(status, headers, exc_info=None):
self.assertTrue(status, '401 Unauthorized')
self.assertTrue('Temp URL invalid' in ''.join(
self.tempurl._invalid({'REQUEST_METHOD': 'GET'},
_start_response)))
self.assertEqual('', ''.join(
self.tempurl._invalid({'REQUEST_METHOD': 'HEAD'},
_start_response)))
def test_auth_scheme_value(self):
# Passthrough
environ = {}
resp = self._make_request('/v1/a/c/o', environ=environ).get_response(
self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertNotIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
self.assertNotIn('swift.auth_scheme', environ)
# Rejected by TempURL
environ = {'REQUEST_METHOD': 'PUT',
'QUERY_STRING':
'temp_url_sig=dummy&temp_url_expires=1234'}
req = self._make_request('/v1/a/c/o', keys=['abc'],
environ=environ)
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 401)
self.assertIn('Temp URL invalid', resp.body)
self.assertIn('Www-Authenticate', resp.headers)
def test_clean_incoming_headers(self):
irh = []
iah = []
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertIn('HTTP_TEST_HEADER', env)
irh = ['test-header']
iah = []
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertNotIn('HTTP_TEST_HEADER', env)
irh = ['test-header-*']
iah = []
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertNotIn('HTTP_TEST_HEADER_ONE', env)
self.assertNotIn('HTTP_TEST_HEADER_TWO', env)
irh = ['test-header-*']
iah = ['test-header-two']
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertNotIn('HTTP_TEST_HEADER_ONE', env)
self.assertIn('HTTP_TEST_HEADER_TWO', env)
irh = ['test-header-*', 'test-other-header']
iah = ['test-header-two', 'test-header-yes-*']
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value',
'HTTP_TEST_OTHER_HEADER': 'value',
'HTTP_TEST_HEADER_YES': 'value',
'HTTP_TEST_HEADER_YES_THIS': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertNotIn('HTTP_TEST_HEADER_ONE', env)
self.assertIn('HTTP_TEST_HEADER_TWO', env)
self.assertNotIn('HTTP_TEST_OTHER_HEADER', env)
self.assertNotIn('HTTP_TEST_HEADER_YES', env)
self.assertIn('HTTP_TEST_HEADER_YES_THIS', env)
def test_clean_outgoing_headers(self):
orh = []
oah = []
hdrs = {'test-header': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertIn('test-header', hdrs)
orh = ['test-header']
oah = []
hdrs = {'test-header': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertNotIn('test-header', hdrs)
orh = ['test-header-*']
oah = []
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertNotIn('test-header-one', hdrs)
self.assertNotIn('test-header-two', hdrs)
orh = ['test-header-*']
oah = ['test-header-two']
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertNotIn('test-header-one', hdrs)
self.assertIn('test-header-two', hdrs)
orh = ['test-header-*', 'test-other-header']
oah = ['test-header-two', 'test-header-yes-*']
hdrs = {'test-header-one': 'value',
'test-header-two': 'value',
'test-other-header': 'value',
'test-header-yes': 'value',
'test-header-yes-this': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertNotIn('test-header-one', hdrs)
self.assertIn('test-header-two', hdrs)
self.assertNotIn('test-other-header', hdrs)
self.assertNotIn('test-header-yes', hdrs)
self.assertIn('test-header-yes-this', hdrs)
def test_unicode_metadata_value(self):
meta = {"temp-url-key": "test", "temp-url-key-2": u"test2"}
results = tempurl.get_tempurl_keys_from_metadata(meta)
for str_value in results:
self.assertIsInstance(str_value, str)
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
tempurl.filter_factory({})
swift_info = utils.get_swift_info()
self.assertIn('tempurl', swift_info)
info = swift_info['tempurl']
self.assertEqual(set(info['methods']),
set(('GET', 'HEAD', 'PUT', 'POST', 'DELETE')))
self.assertEqual(set(info['incoming_remove_headers']),
set(('x-timestamp',)))
self.assertEqual(set(info['incoming_allow_headers']), set())
self.assertEqual(set(info['outgoing_remove_headers']),
set(('x-object-meta-*',)))
self.assertEqual(set(info['outgoing_allow_headers']),
set(('x-object-meta-public-*',)))
def test_non_default_methods(self):
tempurl.filter_factory({
'methods': 'GET HEAD PUT DELETE BREW',
'incoming_remove_headers': '',
'incoming_allow_headers': 'x-timestamp x-versions-location',
'outgoing_remove_headers': 'x-*',
'outgoing_allow_headers': 'x-object-meta-* content-type',
})
swift_info = utils.get_swift_info()
self.assertIn('tempurl', swift_info)
info = swift_info['tempurl']
self.assertEqual(set(info['methods']),
set(('GET', 'HEAD', 'PUT', 'DELETE', 'BREW')))
self.assertEqual(set(info['incoming_remove_headers']), set())
self.assertEqual(set(info['incoming_allow_headers']),
set(('x-timestamp', 'x-versions-location')))
self.assertEqual(set(info['outgoing_remove_headers']), set(('x-*', )))
self.assertEqual(set(info['outgoing_allow_headers']),
set(('x-object-meta-*', 'content-type')))
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from signal import SIGINT
from proton import Message
from system_test import TestCase
from system_test import Qdrouterd
from system_test import main_module
from system_test import TIMEOUT
from system_test import Process
from system_test import AsyncTestSender
from system_test import unittest
class ThreeRouterTest(TestCase):
@classmethod
def setUpClass(cls):
"""
Create a mesh of three routers. Reject any links or messages sent to
an unavailable address.
"""
super(ThreeRouterTest, cls).setUpClass()
def router(name, extra_config):
config = [
('router', {'id': name,
'mode': 'interior',
"defaultDistribution": "unavailable"}),
('listener', {'role': 'normal',
'port': cls.tester.get_port(),
"linkCapacity": '100'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'balanced', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
] + extra_config
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=False))
cls.routers = []
inter_router_A = cls.tester.get_port()
inter_router_B = cls.tester.get_port()
inter_router_C = cls.tester.get_port()
router('RouterA',
[('listener', {'role': 'inter-router', 'port': inter_router_A}),
('connector', {'role': 'inter-router', 'port': inter_router_B})])
router('RouterB',
[('listener', {'role': 'inter-router', 'port': inter_router_B}),
('connector', {'role': 'inter-router', 'port': inter_router_C})])
router('RouterC',
[('listener', {'role': 'inter-router', 'port': inter_router_C}),
('connector', {'role': 'inter-router', 'port': inter_router_A})])
cls.RouterA = cls.routers[0]
cls.RouterB = cls.routers[1]
cls.RouterC = cls.routers[2]
cls.RouterA.wait_router_connected('RouterB')
cls.RouterA.wait_router_connected('RouterC')
cls.RouterB.wait_router_connected('RouterA')
cls.RouterB.wait_router_connected('RouterC')
cls.RouterC.wait_router_connected('RouterA')
cls.RouterC.wait_router_connected('RouterB')
def server_address(self, router):
return router.addresses[0]
def server_port(self, router):
return router.ports[0] # first listener is for client connection
def server_host(self, router):
fam = router.ports_family
return router.get_host(fam.get(self.server_port(router),
"IPv4"))
def spawn_receiver(self, router, count, address, *extra_args):
cmd = ["test-receiver",
"-a", "%s:%s" % (self.server_host(router),
self.server_port(router)),
"-c", str(count), "-s", address] + list(extra_args)
# env = dict(os.environ, PN_TRACE_FRM="1")
# return self.popen(cmd, expect=Process.EXIT_OK, env=env)
return self.popen(cmd, expect=Process.EXIT_OK)
def spawn_sender(self, router, count, address, *extra_args):
cmd = ["test-sender",
"-a", "%s:%s" % (self.server_host(router),
self.server_port(router)),
"-c", str(count), "-t", address] + list(extra_args)
# env = dict(os.environ, PN_TRACE_FRM="1")
# return self.popen(cmd, expect=Process.EXIT_OK, env=env)
return self.popen(cmd, expect=Process.EXIT_OK)
def _rx_failover(self, extra_tx_args=None, extra_rx_args=None):
# Have a single sender transmit unsettled as fast as possible
# non-stop. Have a single receiver that consumes a small number of
# messages before failing over to a different router in the mesh
extra_tx = extra_tx_args or []
extra_rx = extra_rx_args or []
total = 100
router_index = 0
tx = self.spawn_sender(self.RouterC, 0, "balanced/foo", *extra_tx)
while total > 0:
rx = self.spawn_receiver(self.routers[router_index], 5,
"balanced/foo", *extra_rx)
if rx.wait(timeout=TIMEOUT):
raise Exception("Receiver failed to consume all messages")
total -= 5
router_index += 1
if router_index == len(self.routers):
router_index = 0
tx.send_signal(SIGINT)
out_text, out_err = tx.communicate(timeout=TIMEOUT)
if tx.returncode:
raise Exception("Sender failed: %s %s"
% (out_text, out_err))
def test_01_rx_failover_clean(self):
"""
Runs the receiver failover test. In this test the receiver cleanly
shuts down the AMQP endpoint before failing over.
"""
self._rx_failover()
def test_02_rx_failover_dirty(self):
"""
Runs the receiver failover test. In this test the receiver abruptly
drops the TCP connection simulating a client crash.
"""
tcp_drop = ["-E"]
self._rx_failover(extra_rx_args=tcp_drop)
def test_03_unavailable_link_attach(self):
"""
Attempt to attach a link to an unavailable address, expect the router
to detach it
"""
ats = AsyncTestSender(self.server_address(self.RouterA),
"an/unavailable/address")
try:
ats.wait()
self.assertTrue(False) # expect exception
except AsyncTestSender.TestSenderException as exc:
self.assertIn("link error", ats.error)
def test_04_unavailable_anonymous_link_attach(self):
"""
Attempt to attach an anonymous link and send a message to an
unavailable address. Expect to allow the link, but REJECT the message
"""
message = Message(body="REJECTED!!!")
message.address = "another/unavailable/address"
ats = AsyncTestSender(self.server_address(self.RouterA),
target=None,
message=message)
ats.wait()
self.assertEqual(0, ats.accepted)
self.assertEqual(1, ats.rejected)
def test_05_unavailable_anonymous_link_send(self):
"""
Attach an anonymous link and send to a configured address (no
subscribers). Expect to allow the link, but RELEASE the message
"""
message = Message(body="Release me, let me go...")
message.address = "closest/foo"
ats = AsyncTestSender(self.server_address(self.RouterA),
target=None,
message=message)
ats.wait()
self.assertEqual(0, ats.accepted)
self.assertEqual(1, ats.released)
self.assertEqual(0, ats.rejected)
def test_06_parallel_priority(self):
"""
Create 10 senders each with a different priority.
Ensure all messages arrive as expected.
"""
priorities = 10
send_batch = 25
total = priorities * send_batch
rx = self.spawn_receiver(self.RouterC,
total,
"closest/test_06_address",
"-d")
self.RouterA.wait_address("closest/test_06_address")
senders = [self.spawn_sender(self.RouterA,
send_batch,
"closest/test_06_address",
"-sm", "-p%s" % p, "-d")
for p in range(priorities)]
# wait for all senders to finish first, then check the receiver
for tx in senders:
out_text, out_err = tx.communicate(timeout=TIMEOUT)
if tx.returncode:
raise Exception("Sender failed: %s %s" % (out_text, out_err))
if rx.wait(timeout=TIMEOUT):
raise Exception("Receiver failed to consume all messages")
if __name__ == '__main__':
unittest.main(main_module())
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_11_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
|
|
# Authors: Christoph Dinh <[email protected]>
# Martin Luessi <[email protected]>
# Matti Hamalainen <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import time
import copy
import numpy as np
from .. import pick_channels
from ..utils import logger, verbose
from ..epochs import _BaseEpochs
from ..event import _find_events
from ..io.proj import setup_proj
class RtEpochs(_BaseEpochs):
"""Realtime Epochs
Can receive epochs in real time from an RtClient.
For example, to get some epochs from a running mne_rt_server on
'localhost', you could use:
client = mne.realtime.RtClient('localhost')
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.realtime.RtEpochs(client, event_id, tmin, tmax)
epochs.start() # start the measurement and start receiving epochs
evoked_1 = epochs.average() # computed over all epochs
evoked_2 = epochs[-5:].average() # computed over the last 5 epochs
Parameters
----------
client : instance of mne.realtime.RtClient
The realtime client.
event_id : int | list of int
The id of the event to consider. If int, only events with the
ID specified by event_id are considered. Multiple event ID's
can be specified using a list.
tmin : float
Start time before event.
tmax : float
End time after event.
stim_channel : string or list of string
Name of the stim channel or all the stim channels affected by
the trigger.
sleep_time : float
Time in seconds to wait between checking for new epochs when epochs
are requested and the receive queue is empty.
name : string
Comment that describes the Evoked data created.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
reject : dict
Epoch rejection parameters based on peak to peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done.
Values are float. Example:
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict
Epoch rejection parameters based on flatness of signal
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
If flat is None then no rejection is done.
proj : bool, optional
Apply SSP projection vectors
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
isi_max : float
The maximmum time in seconds between epochs. If no epoch
arrives in the next isi_max seconds the RtEpochs stops.
find_events : dict
The arguments to the real-time `find_events` method as a dictionary.
If `find_events` is None, then default values are used.
Valid keys are 'output' | 'consecutive' | 'min_duration' | 'mask'.
Example (also default values):
find_events = dict(output='onset', consecutive='increasing',
min_duration=0, mask=0)
See mne.find_events for detailed explanation of these options.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to client.verbose.
Attributes
----------
info : dict
Measurement info.
event_id : dict
Names of of conditions corresponding to event_ids.
ch_names : list of string
List of channels' names.
events : array, shape (n_events, 3)
The events associated with the epochs currently in the queue.
verbose : bool, str, int, or None
See above.
"""
@verbose
def __init__(self, client, event_id, tmin, tmax, stim_channel='STI 014',
sleep_time=0.1, baseline=(None, 0), picks=None,
name='Unknown', reject=None, flat=None, proj=True,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, isi_max=2., find_events=None, verbose=None):
info = client.get_measurement_info()
# the measurement info of the data as we receive it
self._client_info = copy.deepcopy(info)
verbose = client.verbose if verbose is None else verbose
# call _BaseEpochs constructor
super(RtEpochs, self).__init__(info, event_id, tmin, tmax,
baseline=baseline, picks=picks, name=name, reject=reject,
flat=flat, decim=decim, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, detrend=detrend,
add_eeg_ref=add_eeg_ref, verbose=verbose)
self.proj = proj
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=self.proj)
self._client = client
if not isinstance(stim_channel, list):
stim_channel = [stim_channel]
stim_picks = pick_channels(self._client_info['ch_names'],
include=stim_channel, exclude=[])
if len(stim_picks) == 0:
raise ValueError('No stim channel found to extract event '
'triggers.')
self._stim_picks = stim_picks
# find_events default options
self._find_events_kwargs = dict(output='onset',
consecutive='increasing',
min_duration=0, mask=0)
# update default options if dictionary is provided
if find_events is not None:
self._find_events_kwargs.update(find_events)
min_samples = (self._find_events_kwargs['min_duration']
* self.info['sfreq'])
self._find_events_kwargs.pop('min_duration', None)
self._find_events_kwargs['min_samples'] = min_samples
self._sleep_time = sleep_time
# add calibration factors
cals = np.zeros(self._client_info['nchan'])
for k in range(self._client_info['nchan']):
cals[k] = (self._client_info['chs'][k]['range']
* self._client_info['chs'][k]['cal'])
self._cals = cals[:, None]
# FIFO queues for received epochs and events
self._epoch_queue = list()
self._events = list()
# variables needed for receiving raw buffers
self._last_buffer = None
self._first_samp = 0
self._event_backlog = list()
# Number of good and bad epochs received
self._n_good = 0
self._n_bad = 0
self._started = False
self._last_time = time.time()
self.isi_max = isi_max
@property
def events(self):
"""The events associated with the epochs currently in the queue."""
return np.array(self._events)
def start(self):
"""Start receiving epochs
The measurement will be started if it has not already been started.
"""
if not self._started:
# register the callback
self._client.register_receive_callback(self._process_raw_buffer)
# start the measurement and the receive thread
nchan = self._client_info['nchan']
self._client.start_receive_thread(nchan)
self._started = True
self._last_time = np.inf # init delay counter. Will stop iterations
def stop(self, stop_receive_thread=False, stop_measurement=False):
"""Stop receiving epochs
Parameters
----------
stop_receive_thread : bool
Stop the receive thread. Note: Other RtEpochs instances will also
stop receiving epochs when the receive thread is stopped. The
receive thread will always be stopped if stop_measurement is True.
stop_measurement : bool
Also stop the measurement. Note: Other clients attached to the
server will also stop receiving data.
"""
if self._started:
self._client.unregister_receive_callback(self._process_raw_buffer)
self._started = False
if stop_receive_thread or stop_measurement:
self._client.stop_receive_thread(stop_measurement=stop_measurement)
def next(self, return_event_id=False):
"""To make iteration over epochs easy.
"""
first = True
while True:
current_time = time.time()
if current_time > (self._last_time + self.isi_max):
logger.info('Time of %s seconds exceeded.' % self.isi_max)
raise StopIteration
if len(self._epoch_queue) > self._current:
epoch = self._epoch_queue[self._current]
event_id = self._events[self._current][-1]
self._current += 1
self._last_time = current_time
if return_event_id:
return epoch, event_id
else:
return epoch
if self._started:
if first:
logger.info('Waiting for epoch %d' % (self._current + 1))
first = False
time.sleep(self._sleep_time)
else:
raise RuntimeError('Not enough epochs in queue and currently '
'not receiving epochs, cannot get epochs!')
def _get_data_from_disk(self):
"""Return the data for n_epochs epochs"""
epochs = list()
for epoch in self:
epochs.append(epoch)
data = np.array(epochs)
return data
def _process_raw_buffer(self, raw_buffer):
"""Process raw buffer (callback from RtClient)
Note: Do not print log messages during regular use. It will be printed
asynchronously which is annoying when working in an interactive shell.
Parameters
----------
raw_buffer : array of float, shape=(nchan, n_times)
The raw buffer.
"""
verbose = 'ERROR'
sfreq = self.info['sfreq']
n_samp = len(self._raw_times)
# relative start and stop positions in samples
tmin_samp = int(round(sfreq * self.tmin))
tmax_samp = tmin_samp + n_samp
last_samp = self._first_samp + raw_buffer.shape[1] - 1
# apply calibration without inplace modification
raw_buffer = self._cals * raw_buffer
# detect events
data = np.abs(raw_buffer[self._stim_picks]).astype(np.int)
data = np.atleast_2d(data)
buff_events = _find_events(data, self._first_samp, verbose=verbose,
**self._find_events_kwargs)
events = self._event_backlog
for event_id in self.event_id.values():
idx = np.where(buff_events[:, -1] == event_id)[0]
events.extend(zip(list(buff_events[idx, 0]),
list(buff_events[idx, -1])))
events.sort()
event_backlog = list()
for event_samp, event_id in events:
epoch = None
if (event_samp + tmin_samp >= self._first_samp
and event_samp + tmax_samp <= last_samp):
# easy case: whole epoch is in this buffer
start = event_samp + tmin_samp - self._first_samp
stop = event_samp + tmax_samp - self._first_samp
epoch = raw_buffer[:, start:stop]
elif (event_samp + tmin_samp < self._first_samp
and event_samp + tmax_samp <= last_samp):
# have to use some samples from previous buffer
if self._last_buffer is None:
continue
n_last = self._first_samp - (event_samp + tmin_samp)
n_this = n_samp - n_last
epoch = np.c_[self._last_buffer[:, -n_last:],
raw_buffer[:, :n_this]]
elif event_samp + tmax_samp > last_samp:
# we need samples from the future
# we will process this epoch with the next buffer
event_backlog.append((event_samp, event_id))
else:
raise RuntimeError('Unhandled case..')
if epoch is not None:
self._append_epoch_to_queue(epoch, event_samp, event_id)
# set things up for processing of next buffer
self._event_backlog = event_backlog
n_buffer = raw_buffer.shape[1]
if self._last_buffer is None:
self._last_buffer = raw_buffer
self._first_samp = last_samp + 1
elif self._last_buffer.shape[1] <= n_samp + n_buffer:
self._last_buffer = np.c_[self._last_buffer, raw_buffer]
else:
# do not increase size of _last_buffer any further
self._first_samp = self._first_samp + n_buffer
self._last_buffer[:, :-n_buffer] = self._last_buffer[:, n_buffer:]
self._last_buffer[:, -n_buffer:] = raw_buffer
def _append_epoch_to_queue(self, epoch, event_samp, event_id):
"""Append a (raw) epoch to queue
Note: Do not print log messages during regular use. It will be printed
asynchronously which is annyoing when working in an interactive shell.
Parameters
----------
epoch : array of float, shape=(nchan, n_times)
The raw epoch (only calibration has been applied) over all
channels.
event_samp : int
The time in samples when the epoch occurred.
event_id : int
The event ID of the epoch.
"""
# select the channels
epoch = epoch[self.picks, :]
# handle offset
if self._offset is not None:
epoch += self._offset
# apply SSP
if self.proj and self._projector is not None:
epoch = np.dot(self._projector, epoch)
# Detrend, baseline correct, decimate
epoch = self._preprocess(epoch, verbose='ERROR')
# Decide if this is a good epoch
is_good, _ = self._is_good_epoch(epoch, verbose='ERROR')
if is_good:
self._epoch_queue.append(epoch)
self._events.append((event_samp, 0, event_id))
self._n_good += 1
else:
self._n_bad += 1
def __repr__(self):
s = 'good / bad epochs received: %d / %d, epochs in queue: %d, '\
% (self._n_good, self._n_bad, len(self._epoch_queue))
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
return '<RtEpochs | %s>' % s
|
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from neutron.agent import rpc as agent_rpc
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib
from neutron.tests import base
class TestCreateESwitchManager(base.BaseTestCase):
SCANNED_DEVICES = [('0000:06:00.1', 0),
('0000:06:00.2', 1),
('0000:06:00.3', 2)]
@staticmethod
def cleanup():
if hasattr(esm.ESwitchManager, '_instance'):
del esm.ESwitchManager._instance
def test_create_eswitch_mgr_fail(self):
device_mappings = {'physnet1': ['p6p1']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
side_effect=exc.InvalidDeviceError(
dev_name="p6p1", reason="device" " not found")),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True):
eswitch_mgr = esm.ESwitchManager()
self.addCleanup(self.cleanup)
self.assertRaises(exc.InvalidDeviceError,
eswitch_mgr.discover_devices,
device_mappings, None)
def test_create_eswitch_mgr_ok(self):
device_mappings = {'physnet1': ['p6p1']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=self.SCANNED_DEVICES),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True):
eswitch_mgr = esm.ESwitchManager()
self.addCleanup(self.cleanup)
eswitch_mgr.discover_devices(device_mappings, None)
class TestESwitchManagerApi(base.BaseTestCase):
SCANNED_DEVICES = [('0000:06:00.1', 0),
('0000:06:00.2', 1),
('0000:06:00.3', 2)]
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
WRONG_MAC = '00:00:00:00:00:67'
WRONG_PCI = "0000:06:00.6"
MAX_RATE = esm.IP_LINK_CAPABILITY_RATE
MIN_RATE = esm.IP_LINK_CAPABILITY_MIN_TX_RATE
def setUp(self):
super(TestESwitchManagerApi, self).setUp()
device_mappings = {'physnet1': ['p6p1']}
self.eswitch_mgr = esm.ESwitchManager()
self.addCleanup(self.cleanup)
self._set_eswitch_manager(self.eswitch_mgr, device_mappings)
@staticmethod
def cleanup():
if hasattr(esm.ESwitchManager, '_instance'):
del esm.ESwitchManager._instance
def _set_eswitch_manager(self, eswitch_mgr, device_mappings):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=self.SCANNED_DEVICES), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True):
eswitch_mgr.discover_devices(device_mappings, None)
def test_discover_devices_with_device(self):
device_mappings = {'physnet1': ['p6p1', 'p6p2']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.ESwitchManager._create_emb_switch",
) as emb_switch:
self.eswitch_mgr.discover_devices(device_mappings, None)
self.assertTrue(emb_switch.called)
def test_discover_devices_without_device(self):
device_mappings = {'physnet1': ['p6p1', 'p6p2']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=False), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.ESwitchManager._create_emb_switch",
) as emb_switch:
self.eswitch_mgr.discover_devices(device_mappings, None)
self.assertFalse(emb_switch.called)
def test_get_assigned_devices_info(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_assigned_devices_info",
return_value=[(self.ASSIGNED_MAC, self.PCI_SLOT)]):
result = self.eswitch_mgr.get_assigned_devices_info()
self.assertIn(self.ASSIGNED_MAC, list(result)[0])
self.assertIn(self.PCI_SLOT, list(result)[0])
def test_get_assigned_devices_info_multiple_nics_for_physnet(self):
device_mappings = {'physnet1': ['p6p1', 'p6p2']}
devices_info = {
'p6p1': [(self.ASSIGNED_MAC, self.PCI_SLOT)],
'p6p2': [(self.WRONG_MAC, self.WRONG_PCI)],
}
def get_assigned_devices_info(self):
return devices_info[self.dev_name]
self._set_eswitch_manager(self.eswitch_mgr, device_mappings)
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_assigned_devices_info",
side_effect=get_assigned_devices_info,
autospec=True):
result = self.eswitch_mgr.get_assigned_devices_info()
self.assertIn(devices_info['p6p1'][0], list(result))
self.assertIn(devices_info['p6p2'][0], list(result))
def test_get_device_status_enable(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value='enable'):
result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT)
self.assertEqual('enable', result)
def test_get_device_status_disable(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value='disable'):
result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT)
self.assertEqual('disable', result)
def test_get_device_status_auto(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value='auto'):
result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT)
self.assertEqual('auto', result)
def test_get_device_status_mismatch(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value='enable'):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
result = self.eswitch_mgr.get_device_state(self.WRONG_MAC,
self.PCI_SLOT)
log_mock.assert_called_with('device pci mismatch: '
'%(device_mac)s - %(pci_slot)s',
{'pci_slot': self.PCI_SLOT,
'device_mac': self.WRONG_MAC})
self.assertEqual('disable', result)
def test_set_device_status(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_state"):
self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT, True, False)
def test_set_device_max_rate(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC) as get_pci_mock,\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_rate")\
as set_device_rate_mock:
self.eswitch_mgr.set_device_max_rate(self.ASSIGNED_MAC,
self.PCI_SLOT, 1000)
get_pci_mock.assert_called_once_with(self.PCI_SLOT)
set_device_rate_mock.assert_called_once_with(
self.PCI_SLOT, self.MAX_RATE, 1000)
def test_set_device_min_tx_rate(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC) as get_pci_mock,\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_rate")\
as set_device_rate_mock:
self.eswitch_mgr.set_device_min_tx_rate(self.ASSIGNED_MAC,
self.PCI_SLOT, 1000)
get_pci_mock.assert_called_once_with(self.PCI_SLOT)
set_device_rate_mock.assert_called_once_with(
self.PCI_SLOT, self.MIN_RATE, 1000)
def test_set_device_status_mismatch(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_state"):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
self.eswitch_mgr.set_device_state(self.WRONG_MAC,
self.PCI_SLOT, True, False)
log_mock.assert_called_with('device pci mismatch: '
'%(device_mac)s - %(pci_slot)s',
{'pci_slot': self.PCI_SLOT,
'device_mac': self.WRONG_MAC})
def _mock_device_exists(self, pci_slot, mac_address, expected_result):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC):
result = self.eswitch_mgr.device_exists(mac_address,
pci_slot)
self.assertEqual(expected_result, result)
def test_device_exists_true(self):
self._mock_device_exists(self.PCI_SLOT,
self.ASSIGNED_MAC,
True)
def test_device_exists_false(self):
self._mock_device_exists(self.WRONG_PCI,
self.WRONG_MAC,
False)
def test_device_exists_mismatch(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
result = self.eswitch_mgr.device_exists(self.WRONG_MAC,
self.PCI_SLOT)
log_mock.assert_called_with('device pci mismatch: '
'%(device_mac)s - %(pci_slot)s',
{'pci_slot': self.PCI_SLOT,
'device_mac': self.WRONG_MAC})
self.assertFalse(result)
def test_clear_max_rate(self):
with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.'
'eswitch_manager.ESwitchManager._clear_rate') \
as clear_rate_mock:
self.eswitch_mgr.clear_max_rate(self.PCI_SLOT)
clear_rate_mock.assert_called_once_with(self.PCI_SLOT,
self.MAX_RATE)
def test_clear_min_tx_rate(self):
with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.'
'eswitch_manager.ESwitchManager._clear_rate') \
as clear_rate_mock:
self.eswitch_mgr.clear_min_tx_rate(self.PCI_SLOT)
clear_rate_mock.assert_called_once_with(self.PCI_SLOT,
self.MIN_RATE)
def test_process_emb_switch_without_device(self):
device_mappings = {'physnet1': ['p6p1', 'p6p2']}
phys_net = 'physnet1'
dev_name = 'p6p1'
self._set_eswitch_manager(self.eswitch_mgr, device_mappings)
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=False), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.ESwitchManager._create_emb_switch",
) as emb_switch:
self.eswitch_mgr._process_emb_switch_map(phys_net,
dev_name, {})
self.assertFalse(emb_switch.called)
def test_process_emb_switch_with_device(self):
device_mappings = {'physnet1': ['p6p1', 'p6p2']}
phys_net = 'physnet1'
dev_name = 'p6p3'
self._set_eswitch_manager(self.eswitch_mgr, device_mappings)
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.ESwitchManager._create_emb_switch",
) as emb_switch:
self.eswitch_mgr._process_emb_switch_map(phys_net,
dev_name, {})
self.assertTrue(emb_switch.called)
def _test_clear_rate(self, rate_type, pci_slot, passed, mac_address):
with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.'
'eswitch_manager.EmbSwitch.set_device_rate') \
as set_rate_mock, \
mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.'
'pci_lib.PciDeviceIPWrapper.get_assigned_macs',
return_value=mac_address), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True):
self.eswitch_mgr._clear_rate(pci_slot, rate_type)
if passed:
set_rate_mock.assert_called_once_with(pci_slot, rate_type, 0)
else:
self.assertFalse(set_rate_mock.called)
def test_clear_rate_max_rate_existing_pci_slot(self):
self._test_clear_rate(self.MAX_RATE, self.PCI_SLOT, passed=True,
mac_address={})
def test_clear_rate_max_rate_exist_and_assigned_pci(self):
self._test_clear_rate(self.MAX_RATE, self.PCI_SLOT, passed=False,
mac_address={0: self.ASSIGNED_MAC})
def test_clear_rate_max_rate_nonexisting_pci_slot(self):
self._test_clear_rate(self.MAX_RATE, self.WRONG_PCI, passed=False,
mac_address={})
def test_clear_rate_min_tx_rate_existing_pci_slot(self):
self._test_clear_rate(self.MIN_RATE, self.PCI_SLOT, passed=True,
mac_address={})
def test_clear_rate_min_tx_rate_exist_and_assigned_pci(self):
self._test_clear_rate(self.MIN_RATE, self.PCI_SLOT, passed=False,
mac_address={0: self.ASSIGNED_MAC})
def test_clear_rate_min_tx_rate_nonexisting_pci_slot(self):
self._test_clear_rate(self.MIN_RATE, self.WRONG_PCI, passed=False,
mac_address={})
def test_create_emb_switch(self):
DEVICES = [('0000:04:00.1', 0),
('0000:04:00.2', 1)]
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
side_effect=[[], DEVICES]), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.get_numvfs",
return_value=2):
physnet = 'test_create_emb_switch'
self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map)
# first time device will not be added as no VFs returned
self.eswitch_mgr._create_emb_switch(physnet, 'dev1', [])
self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map)
self.assertEqual({'dev1'}, self.eswitch_mgr.skipped_devices)
# second time device should be added with 2 VFs
self.eswitch_mgr._create_emb_switch(physnet, 'dev1', [])
self.assertIn(physnet, self.eswitch_mgr.emb_switches_map)
self.assertEqual(set(), self.eswitch_mgr.skipped_devices)
self.assertIn('0000:04:00.1', self.eswitch_mgr.pci_slot_map)
self.assertIn('0000:04:00.2', self.eswitch_mgr.pci_slot_map)
def test_create_emb_switch_zero_vfs(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=[]), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.get_numvfs",
return_value=0):
physnet = 'test_create_emb_switch'
self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map)
# first time device will not be added
self.eswitch_mgr._create_emb_switch(physnet, 'dev1', [])
self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map)
self.assertEqual({'dev1'}, self.eswitch_mgr.skipped_devices)
# second time device should be added with 0 VFs
self.eswitch_mgr._create_emb_switch(physnet, 'dev1', [])
self.assertIn(physnet, self.eswitch_mgr.emb_switches_map)
self.assertEqual(set(), self.eswitch_mgr.skipped_devices)
class TestEmbSwitch(base.BaseTestCase):
DEV_NAME = "eth2"
PHYS_NET = "default"
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = "0000:06:00.1"
WRONG_PCI_SLOT = "0000:06:00.4"
SCANNED_DEVICES = [('0000:06:00.1', 0),
('0000:06:00.2', 1),
('0000:06:00.3', 2)]
VF_TO_MAC_MAPPING = {0: '00:00:00:00:00:11',
1: '00:00:00:00:00:22',
2: '00:00:00:00:00:33'}
EXPECTED_MAC_TO_PCI = {
'00:00:00:00:00:11': '0000:06:00.1',
'00:00:00:00:00:22': '0000:06:00.2',
'00:00:00:00:00:33': '0000:06:00.3'}
def setUp(self):
super(TestEmbSwitch, self).setUp()
exclude_devices = set()
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=self.SCANNED_DEVICES):
self.emb_switch = esm.EmbSwitch(self.DEV_NAME, exclude_devices)
self.mock_get_vfs = mock.patch.object(esm.EmbSwitch,
'_get_vfs').start()
self.vf_rates = [{esm.IP_LINK_CAPABILITY_RATE: 500,
esm.IP_LINK_CAPABILITY_MIN_TX_RATE: 250}]
self.mock_get_vfs.return_value = self.vf_rates
def test_get_assigned_devices_info(self):
with mock.patch.object(pci_lib.PciDeviceIPWrapper, 'get_assigned_macs',
return_value={0: self.ASSIGNED_MAC}), \
mock.patch.object(esm.PciOsWrapper, 'pf_device_exists',
return_value=True), \
mock.patch.object(esm.PciOsWrapper, 'is_assigned_vf_direct',
return_value=True):
result = self.emb_switch.get_assigned_devices_info()
device_info = agent_rpc.DeviceInfo(self.ASSIGNED_MAC,
self.PCI_SLOT)
self.assertEqual(1, len(result))
self.assertEqual(device_info, result[0])
def test_get_assigned_devices_info_multiple_slots(self):
with mock.patch.object(pci_lib.PciDeviceIPWrapper, 'get_assigned_macs',
return_value=self.VF_TO_MAC_MAPPING), \
mock.patch.object(esm.PciOsWrapper, 'pf_device_exists',
return_value=True), \
mock.patch.object(esm.PciOsWrapper, 'is_assigned_vf_direct',
return_value=True):
devices_info = self.emb_switch.get_assigned_devices_info()
for device_info in devices_info:
self.assertEqual(self.EXPECTED_MAC_TO_PCI[device_info.mac],
device_info.pci_slot)
def test_get_assigned_devices_empty(self):
with mock.patch.object(esm.PciOsWrapper, 'is_assigned_vf_direct',
return_value=False), \
mock.patch.object(esm.PciOsWrapper, 'is_assigned_vf_macvtap',
return_value=False):
result = self.emb_switch.get_assigned_devices_info()
self.assertEqual([], result)
def test_get_device_state_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_vf_state",
return_value=False):
result = self.emb_switch.get_device_state(self.PCI_SLOT)
self.assertFalse(result)
def test_get_device_state_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_vf_state",
return_value=False):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.get_device_state,
self.WRONG_PCI_SLOT)
def test_set_device_state_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_state"):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"pci_lib.LOG.warning") as log_mock:
self.emb_switch.set_device_state(self.PCI_SLOT, True, False)
self.assertEqual(0, log_mock.call_count)
def test_set_device_state_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_state"):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.set_device_state,
self.WRONG_PCI_SLOT, True, False)
def test_set_device_spoofcheck_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_spoofcheck") as \
set_vf_spoofcheck_mock:
self.emb_switch.set_device_spoofcheck(self.PCI_SLOT, True)
self.assertTrue(set_vf_spoofcheck_mock.called)
def test_set_device_spoofcheck_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_spoofcheck"):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.set_device_spoofcheck,
self.WRONG_PCI_SLOT, True)
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_rate_ok(self, mock_set_vf_rate):
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 2000})
self.vf_rates[0][esm.IP_LINK_CAPABILITY_RATE] = 2
mock_set_vf_rate.assert_called_with(0, self.vf_rates[0])
# No 'min_tx_rate' support
vf_rates = [{esm.IP_LINK_CAPABILITY_RATE: 500,
esm.IP_LINK_CAPABILITY_MIN_TX_RATE: None}]
self.mock_get_vfs.return_value = vf_rates
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 2000})
vf_rates[0] = {esm.IP_LINK_CAPABILITY_RATE: 2}
mock_set_vf_rate.assert_called_with(0, vf_rates[0])
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_max_rate_ok2(self, mock_set_vf_rate):
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 99})
self.vf_rates[0][esm.IP_LINK_CAPABILITY_RATE] = 1
mock_set_vf_rate.assert_called_with(0, self.vf_rates[0])
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_max_rate_rounded_ok(self, mock_set_vf_rate):
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 2001})
self.vf_rates[0][esm.IP_LINK_CAPABILITY_RATE] = 2
mock_set_vf_rate.assert_called_with(0, self.vf_rates[0])
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_max_rate_rounded_ok2(self, mock_set_vf_rate):
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 2499})
self.vf_rates[0][esm.IP_LINK_CAPABILITY_RATE] = 2
mock_set_vf_rate.assert_called_with(0, self.vf_rates[0])
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_max_rate_rounded_ok3(self, mock_set_vf_rate):
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 2500})
self.vf_rates[0][esm.IP_LINK_CAPABILITY_RATE] = 3
mock_set_vf_rate.assert_called_with(0, self.vf_rates[0])
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_max_rate_disable(self, mock_set_vf_rate):
self.emb_switch.set_device_rate(
self.PCI_SLOT, {esm.IP_LINK_CAPABILITY_RATE: 0})
self.vf_rates[0][esm.IP_LINK_CAPABILITY_RATE] = 0
mock_set_vf_rate.assert_called_with(0, self.vf_rates[0])
@mock.patch.object(pci_lib.PciDeviceIPWrapper, 'set_vf_rate')
def test_set_device_max_rate_fail(self, *args):
self.assertRaises(
exc.InvalidPciSlotError,
self.emb_switch.set_device_rate,
self.WRONG_PCI_SLOT,
{esm.IP_LINK_CAPABILITY_RATE: 1000})
def test_get_pci_device(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value={0: self.ASSIGNED_MAC}),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper."
"is_assigned_vf_direct", return_value=True), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True):
result = self.emb_switch.get_pci_device(self.PCI_SLOT)
self.assertEqual(self.ASSIGNED_MAC, result)
def test_get_pci_device_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value=[self.ASSIGNED_MAC]),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.pf_device_exists",
return_value=True), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper."
"is_assigned_vf_direct", return_value=True):
result = self.emb_switch.get_pci_device(self.WRONG_PCI_SLOT)
self.assertIsNone(result)
def test_get_pci_list(self):
result = self.emb_switch.get_pci_slot_list()
self.assertEqual([tup[0] for tup in self.SCANNED_DEVICES],
sorted(result))
def _test__get_macvtap_mac(self, upper_devs):
ip_wrapper_mock_inst = mock.MagicMock()
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper",
return_value=ip_wrapper_mock_inst), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper."
"get_vf_macvtap_upper_devs",
return_value=upper_devs), \
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
self.emb_switch._get_macvtap_mac(0)
ip_wrapper_mock_inst.device.assert_called_with(upper_devs[0])
if len(upper_devs) > 1:
self.assertTrue(log_mock.called)
else:
self.assertFalse(log_mock.called)
def test__get_macvtap_mac_single_upper_dev(self):
upper_devs = ["macvtap0"]
self._test__get_macvtap_mac(upper_devs)
def test__get_macvtap_mac_multiple_upper_devs(self):
upper_devs = ["macvtap0", "macvtap1"]
self._test__get_macvtap_mac(upper_devs)
class TestPciOsWrapper(base.BaseTestCase):
DEV_NAME = "p7p1"
VF_INDEX = 1
DIR_CONTENTS = [
"mlx4_port1",
"virtfn0",
"virtfn1",
"virtfn2"
]
DIR_CONTENTS_NO_MATCH = [
"mlx4_port1",
"mlx4_port1"
]
LINKS = {
"virtfn0": "../0000:04:00.1",
"virtfn1": "../0000:04:00.2",
"virtfn2": "../0000:04:00.3"
}
PCI_SLOTS = [
('0000:04:00.1', 0),
('0000:04:00.2', 1),
('0000:04:00.3', 2)
]
def test_scan_vf_devices(self):
def _get_link(file_path):
file_name = os.path.basename(file_path)
return self.LINKS[file_name]
with mock.patch("os.path.isdir", return_value=True),\
mock.patch("os.listdir", return_value=self.DIR_CONTENTS),\
mock.patch("os.path.islink", return_value=True),\
mock.patch("os.readlink", side_effect=_get_link):
result = esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)
self.assertEqual(self.PCI_SLOTS, result)
def test_scan_vf_devices_no_dir(self):
with mock.patch("os.path.isdir", return_value=False):
self.assertRaises(exc.InvalidDeviceError,
esm.PciOsWrapper.scan_vf_devices,
self.DEV_NAME)
def test_scan_vf_devices_no_content(self):
with mock.patch("os.path.isdir", return_value=True),\
mock.patch("os.listdir", return_value=[]):
self.assertEqual([],
esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME))
def test_scan_vf_devices_no_match(self):
with mock.patch("os.path.isdir", return_value=True),\
mock.patch("os.listdir",
return_value=self.DIR_CONTENTS_NO_MATCH):
self.assertEqual([],
esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME))
def _mock_assign_vf_direct(self, dir_exists):
with mock.patch("os.path.isdir",
return_value=dir_exists):
result = esm.PciOsWrapper.is_assigned_vf_direct(self.DEV_NAME,
self.VF_INDEX)
self.assertEqual(not dir_exists, result)
def test_is_assigned_vf_direct_true(self):
self._mock_assign_vf_direct(True)
def test_is_assigned_vf_direct_false(self):
self._mock_assign_vf_direct(False)
def _mock_assign_vf_macvtap(self, macvtap_exists):
def _glob(file_path):
return ["upper_macvtap0"] if macvtap_exists else []
with mock.patch("glob.glob", side_effect=_glob):
result = esm.PciOsWrapper.is_assigned_vf_macvtap(self.DEV_NAME,
self.VF_INDEX)
self.assertEqual(macvtap_exists, result)
def test_is_assigned_vf_macvtap_true(self):
self._mock_assign_vf_macvtap(True)
def test_is_assigned_vf_macvtap_false(self):
self._mock_assign_vf_macvtap(False)
def _test_get_vf_macvtap_upper_devs(self, upper_devs):
with mock.patch("glob.glob", return_value=upper_devs):
result = esm.PciOsWrapper.get_vf_macvtap_upper_devs(self.DEV_NAME,
self.VF_INDEX)
self.assertEqual([dev.split("_")[1] for dev in upper_devs], result)
def test_get_vf_macvtap_upper_devs(self):
upper_devs = ["upper_macvtap0", "upper_macvtap1"]
self._test_get_vf_macvtap_upper_devs(upper_devs)
def test_get_vf_macvtap_upper_devs_no_devs(self):
upper_devs = []
self._test_get_vf_macvtap_upper_devs(upper_devs)
def test_pf_device_exists_with_no_dir(self):
with mock.patch("os.path.isdir", return_value=False):
self.assertFalse(esm.PciOsWrapper.pf_device_exists('p6p1'))
def test_pf_device_exists_with_dir(self):
with mock.patch("os.path.isdir", return_value=True):
self.assertTrue(esm.PciOsWrapper.pf_device_exists('p6p1'))
def test_get_numvfs(self):
with mock.patch("builtins.open",
mock.mock_open(read_data="63")) as mock_open:
self.assertEqual(63, esm.PciOsWrapper.get_numvfs('dev1'))
mock_open.assert_called_once_with(
esm.PciOsWrapper.NUMVFS_PATH % 'dev1')
def test_get_numvfs_no_file(self):
with mock.patch("builtins.open", side_effect=IOError()) as mock_open:
self.assertEqual(-1, esm.PciOsWrapper.get_numvfs('dev1'))
mock_open.assert_called_once_with(
esm.PciOsWrapper.NUMVFS_PATH % 'dev1')
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A demo for on-device backprop (transfer learning) of a classification model.
This demo runs a similar task as described in TF Poets tutorial, except that
learning happens on-device.
https://codelabs.developers.google.com/codelabs/tensorflow-for-poets/#0
Here are the steps:
1) mkdir -p /tmp/retrain/
2) curl http://download.tensorflow.org/example_images/flower_photos.tgz \
| tar xz -C /tmp/retrain
3) Start training:
python3 backprop_last_layer.py \
--data_dir /tmp/retrain/flower_photos \
--embedding_extractor_path \
models/mobilenet_v1_1.0_224_quant_embedding_extractor_edgetpu.tflite
Weights for retrained last layer will be saved to /tmp/retrain/output by
default.
4) Run an inference with the new model:
python3 classify_image.py \
--model /tmp/retrain/output/retrained_model_edgetpu.tflite \
--label /tmp/retrain/output/label_map.txt
--image images/sunflower.bmp
For more information, see
https://coral.ai/docs/edgetpu/retrain-classification-ondevice-backprop/
"""
import argparse
import contextlib
import os
import sys
import time
from edgetpu.basic import basic_engine
from edgetpu.learn.backprop.softmax_regression import SoftmaxRegression
import numpy as np
from PIL import Image
@contextlib.contextmanager
def test_image(path):
"""Returns opened test image."""
with open(path, 'rb') as f:
with Image.open(f) as image:
yield image
def save_label_map(label_map, out_path):
"""Saves label map to a file."""
with open(out_path, 'w') as f:
for key, val in label_map.items():
f.write('%s %s\n' % (key, val))
def get_image_paths(data_dir):
"""Walks through data_dir and returns list of image paths and label map.
Args:
data_dir: string, path to data directory. It assumes data directory is
organized as,
- [CLASS_NAME_0]
-- image_class_0_a.jpg
-- image_class_0_b.jpg
-- ...
- [CLASS_NAME_1]
-- image_class_1_a.jpg
-- ...
Returns:
A tuple of (image_paths, labels, label_map)
image_paths: list of string, represents image paths
labels: list of int, represents labels
label_map: a dictionary (int -> string), e.g., 0->class0, 1->class1, etc.
"""
classes = None
image_paths = []
labels = []
class_idx = 0
for root, dirs, files in os.walk(data_dir):
if root == data_dir:
# Each sub-directory in `data_dir`
classes = dirs
else:
# Read each sub-directory
assert classes[class_idx] in root
print('Reading dir: %s, which has %d images' % (root, len(files)))
for img_name in files:
image_paths.append(os.path.join(root, img_name))
labels.append(class_idx)
class_idx += 1
return image_paths, labels, dict(zip(range(class_idx), classes))
def shuffle_and_split(image_paths, labels, val_percent=0.1, test_percent=0.1):
"""Shuffles and splits data into train, validation, and test sets.
Args:
image_paths: list of string, of dim num_data
labels: list of int of length num_data
val_percent: validation data set percentage.
test_percent: test data set percentage.
Returns:
Two dictionaries (train_and_val_dataset, test_dataset).
train_and_val_dataset has the following fields.
'data_train': data_train
'labels_train': labels_train
'data_val': data_val
'labels_val': labels_val
test_dataset has the following fields.
'data_test': data_test
'labels_test': labels_test
"""
image_paths = np.array(image_paths)
labels = np.array(labels)
perm = np.random.permutation(image_paths.shape[0])
image_paths = image_paths[perm]
labels = labels[perm]
num_total = image_paths.shape[0]
num_val = int(num_total * val_percent)
num_test = int(num_total * test_percent)
num_train = num_total - num_val - num_test
train_and_val_dataset = {}
train_and_val_dataset['data_train'] = image_paths[0:num_train]
train_and_val_dataset['labels_train'] = labels[0:num_train]
train_and_val_dataset['data_val'] = image_paths[num_train:num_train + num_val]
train_and_val_dataset['labels_val'] = labels[num_train:num_train + num_val]
test_dataset = {}
test_dataset['data_test'] = image_paths[num_train + num_val:]
test_dataset['labels_test'] = labels[num_train + num_val:]
return train_and_val_dataset, test_dataset
def extract_embeddings(image_paths, engine):
"""Uses model to process images as embeddings.
Reads image, resizes and feeds to model to get feature embeddings. Original
image is discarded to keep maximum memory consumption low.
Args:
image_paths: ndarray, represents a list of image paths.
engine: BasicEngine, wraps embedding extractor model.
Returns:
ndarray of length image_paths.shape[0] of embeddings.
"""
_, input_height, input_width, _ = engine.get_input_tensor_shape()
assert engine.get_num_of_output_tensors() == 1
feature_dim = engine.get_output_tensor_size(0)
embeddings = np.empty((len(image_paths), feature_dim), dtype=np.float32)
for idx, path in enumerate(image_paths):
with test_image(path) as img:
img = img.resize((input_width, input_height), Image.NEAREST)
_, embeddings[idx, :] = engine.run_inference(np.asarray(img).flatten())
return embeddings
def train(model_path, data_dir, output_dir):
"""Trains a softmax regression model given data and embedding extractor.
Args:
model_path: string, path to embedding extractor.
data_dir: string, directory that contains training data.
output_dir: string, directory to save retrained tflite model and label map.
"""
t0 = time.perf_counter()
image_paths, labels, label_map = get_image_paths(data_dir)
train_and_val_dataset, test_dataset = shuffle_and_split(image_paths, labels)
# Initializes basic engine model here to avoid repeatedly initialization,
# which is time consuming.
engine = basic_engine.BasicEngine(model_path)
print('Extract embeddings for data_train')
train_and_val_dataset['data_train'] = extract_embeddings(
train_and_val_dataset['data_train'], engine)
print('Extract embeddings for data_val')
train_and_val_dataset['data_val'] = extract_embeddings(
train_and_val_dataset['data_val'], engine)
t1 = time.perf_counter()
print('Data preprocessing takes %.2f seconds' % (t1 - t0))
# Construct model and start training
weight_scale = 5e-2
reg = 0.0
feature_dim = train_and_val_dataset['data_train'].shape[1]
num_classes = np.max(train_and_val_dataset['labels_train']) + 1
model = SoftmaxRegression(
feature_dim, num_classes, weight_scale=weight_scale, reg=reg)
learning_rate = 1e-2
batch_size = 100
num_iter = 500
model.train_with_sgd(
train_and_val_dataset, num_iter, learning_rate, batch_size=batch_size)
t2 = time.perf_counter()
print('Training takes %.2f seconds' % (t2 - t1))
# Append learned weights to input model and save as tflite format.
out_model_path = os.path.join(output_dir, 'retrained_model_edgetpu.tflite')
model.save_as_tflite_model(model_path, out_model_path)
print('Model %s saved.' % out_model_path)
label_map_path = os.path.join(output_dir, 'label_map.txt')
save_label_map(label_map, label_map_path)
print('Label map %s saved.' % label_map_path)
t3 = time.perf_counter()
print('Saving retrained model and label map takes %.2f seconds' % (t3 - t2))
retrained_engine = basic_engine.BasicEngine(out_model_path)
test_embeddings = extract_embeddings(test_dataset['data_test'],
retrained_engine)
saved_model_acc = np.mean(
np.argmax(test_embeddings, axis=1) == test_dataset['labels_test'])
print('Saved tflite model test accuracy: %.2f%%' % (saved_model_acc * 100))
t4 = time.perf_counter()
print('Checking test accuracy takes %.2f seconds' % (t4 - t3))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--embedding_extractor_path',
required=True,
help='Path to embedding extractor tflite model.')
parser.add_argument('--data_dir', required=True, help='Directory to data.')
parser.add_argument(
'--output_dir',
default='/tmp/retrain/output',
help='Path to directory to save retrained model and label map.')
args = parser.parse_args()
if not os.path.exists(args.data_dir):
sys.exit('%s does not exist!' % args.data_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
train(args.embedding_extractor_path, args.data_dir, args.output_dir)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
import curses
import libvirt
import threading
import time
from ovm.lvconnect import LibvirtConnect
from ovm.utils.printer import si_unit
UPDATE_DATA_INTERVAL = 1
REFRESH_INTERVAL = 0.5
SORT_NAME, SORT_CPU, SORT_MEM = 0, 1, 3
class DomainStats:
def __init__(self, domain, host_stats):
self.domain = domain
self.name = domain.name()
self.host_stats = host_stats
# CPU
self.cpu_usage = 0
self.cpu_time = 0
# Memory
self.host_mem = self.guest_mem = 0
# Network
self.net_rx_bytes = self.net_tx_bytes = 0
self.net_rx_rate = self.net_tx_rate = 0
# Storage
self.block_rd_bytes = self.block_wr_bytes = 0
self.block_rd_rate = self.block_wr_rate = 0
@staticmethod
def compute_cpu_usage(prev, cur, cpu_count):
return min(
(cur - prev) / (UPDATE_DATA_INTERVAL * cpu_count * 10**7),
100
)
def update_cpu(self, stats):
previous_cpu_time = self.cpu_time
domain_cpu_count = stats.get('vcpu.current', 1)
sum_time = 0
for i in range(domain_cpu_count):
sum_time += stats.get('vcpu.0.time', 0)
current_cpu_time = sum_time / domain_cpu_count
if previous_cpu_time > 0:
self.cpu_usage = self.compute_cpu_usage(
previous_cpu_time, current_cpu_time, self.host_stats.cpu_count
)
self.cpu_time = current_cpu_time
def update_memory(self, stats):
# Current memory allocated on the host
self.host_mem = self.domain.memoryStats().get('rss', 0) * 1024
# guest current max memory
self.guest_mem = stats.get('balloon.maximum', 0) * 1024
def update_network(self, stats):
current_rx_bytes = stats.get('net.0.rx.bytes', 0)
current_tx_bytes = stats.get('net.0.tx.bytes', 0)
previous_rx_bytes = self.net_rx_bytes
previous_tx_bytes = self.net_tx_bytes
if previous_rx_bytes > 0:
self.net_rx_rate = (
(current_rx_bytes - previous_rx_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
if previous_tx_bytes > 0:
self.net_tx_rate = (
(current_tx_bytes - previous_tx_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
self.net_rx_bytes = current_rx_bytes
self.net_tx_bytes = current_tx_bytes
def update_storage(self, stats):
current_rd_bytes = stats.get('block.0.rd.bytes', 0)
current_wd_bytes = stats.get('block.0.wr.bytes', 0)
previous_rd_bytes = self.block_rd_bytes
previous_wd_bytes = self.block_wr_bytes
if previous_rd_bytes > 0:
self.block_rd_rate = (
(current_rd_bytes - previous_rd_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
if previous_wd_bytes > 0:
self.block_wr_rate = (
(current_wd_bytes - previous_wd_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
self.block_rd_bytes = current_rd_bytes
self.block_wr_bytes = current_wd_bytes
def update(self, stats):
for name in ('cpu', 'memory', 'network', 'storage'):
getattr(self, 'update_%s' % name)(stats)
def format(self, pattern):
stats = {
'name': self.name,
'cpu_usage': round(self.cpu_usage),
'guest_mem': si_unit(self.guest_mem, True) + 'B',
'host_mem': si_unit(self.host_mem, True) + 'B',
'net_rx': '{0}bps'.format(si_unit(self.net_rx_rate)),
'net_tx': '{0}bps'.format(si_unit(self.net_tx_rate)),
'block_rd': '{0}bps'.format(si_unit(self.block_rd_rate)),
'block_wr': '{0}bps'.format(si_unit(self.block_wr_rate))
}
return pattern.format(**stats)
class HostStats:
def __init__(self, connection):
self._connection = connection
self.hostname = connection.getHostname()
host_info = connection.getInfo()
self.cpu_count = host_info[2]
self.cpu_freq = host_info[3] * (10**6)
self.cpu_time = 0
self.cpu_usage = 0
self.mem_vms_total = 0
self.mem_os = 0
self.mem_total = 0
self.mem_cached = 0
self.domain_count = 0
def update(self, total_mem_domain, domain_count):
self.domain_count = domain_count
host_info = self._connection.getInfo()
self.cpu_freq = host_info[3] * (10**6)
cpu_stats = self._connection.getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS)
cpu_time = sum((cpu_stats[k] for k in ('kernel', 'user', 'iowait'))) \
/ self.cpu_count
if self.cpu_time > 0:
self.cpu_usage = min(1, ((cpu_time - self.cpu_time)
/ (UPDATE_DATA_INTERVAL * 10**9)))
self.cpu_time = cpu_time
mem_stats = self._connection.getMemoryStats(
libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS
)
self.mem_total = mem_stats['total'] * 1024
self.mem_vms_total = total_mem_domain
self.mem_os = ((mem_stats['total'] - mem_stats['free']
- mem_stats['cached']
- mem_stats['buffers']) * 1024
- total_mem_domain)
self.mem_cached = (mem_stats['cached'] - mem_stats['buffers']) * 1024
class VMTop:
def __init__(self):
self._domains = {}
self.libvirt_conn = LibvirtConnect.get_connection()
self._sort_on = SORT_NAME
self.host_stats = HostStats(self.libvirt_conn)
self.screen = curses.initscr()
self.init_terminal()
# Init colors
colors = (
('TABLE_HEADER', curses.COLOR_BLACK, curses.COLOR_GREEN),
('TABLE_HEADER_SELECTED', curses.COLOR_BLACK, curses.COLOR_CYAN),
('RED_ON_BLACK', curses.COLOR_RED, curses.COLOR_BLACK),
('GREEN_ON_BLACK', curses.COLOR_GREEN, curses.COLOR_BLACK),
('CYAN_ON_BLACK', curses.COLOR_CYAN, curses.COLOR_BLACK),
('BLACK_ON_CYAN', curses.COLOR_BLACK, curses.COLOR_CYAN),
('YELLOW_ON_BLACK', curses.COLOR_YELLOW, curses.COLOR_BLACK)
)
for i, color in enumerate(colors, 1):
name, fg, bg = color
curses.init_pair(i, fg, bg)
setattr(self, name, curses.color_pair(i))
try:
self.main()
finally:
self.reset_terminal()
def main(self):
refresh_thread = threading.Thread(target=self.refresh)
refresh_thread.daemon = True
refresh_thread.start()
update_data_thread = threading.Thread(target=self.update_data)
update_data_thread.daemon = True
update_data_thread.start()
while True:
event = self.screen.getch()
if event == ord('c'):
self._sort_on = SORT_CPU
elif event == ord('n'):
self._sort_on = SORT_NAME
elif event == ord('m'):
self._sort_on = SORT_MEM
elif event == ord('q'):
break
def init_terminal(self):
curses.start_color()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.screen.keypad(1)
self.screen.clear()
self.screen.refresh()
def reset_terminal(self):
curses.nocbreak()
self.screen.keypad(0)
curses.echo()
curses.endwin()
def update_data(self):
while True:
self._update_data()
time.sleep(UPDATE_DATA_INTERVAL)
def _update_data(self):
total_mem_domain = 0
domains = self.libvirt_conn.getAllDomainStats(
flags=libvirt.VIR_CONNECT_GET_ALL_DOMAINS_STATS_RUNNING
)
current_domains = set()
for domain, libvirt_stats in domains:
name = domain.name()
current_domains.add(name)
if name not in self._domains:
self._domains[name] = DomainStats(domain, self.host_stats)
self._domains[name].update(libvirt_stats)
total_mem_domain += self._domains[name].host_mem
# Delete all domains not active in domain stats list
deleted_domains = set(self._domains.keys()) - current_domains
list(map(self._domains.pop, deleted_domains))
domain_count = len(current_domains)
self.host_stats.update(total_mem_domain, domain_count)
def draw_host_bar(self, line):
style = self.CYAN_ON_BLACK
bar_format = ' :: '.join((
'{hostname}',
'CPU: {cpu_count} ({cpu_freq} MHz)',
'Memory: {mem_total}iB',
'Domains: {domain_count}'
))
text = bar_format.format(
hostname=self.host_stats.hostname,
cpu_count=self.host_stats.cpu_count,
cpu_freq=int(self.host_stats.cpu_freq / 10**6),
mem_total=si_unit(self.host_stats.mem_total),
domain_count=self.host_stats.domain_count
)
self.screen.addstr(line, 0, text, style)
self.screen.clrtoeol()
def draw_cpu_bar(self, line):
# Some params
bar_graph_width = 40
# Inialize the line
self.screen.move(line, 0)
self.screen.clrtoeol()
# Show 'CPU'
self.screen.move(line, 1)
self.screen.addstr('CPU', self.CYAN_ON_BLACK)
# Print the left side of the bar graph
self.screen.addstr(' [')
# Print the memory take by OS
pipe_count = int(round(self.host_stats.cpu_usage * bar_graph_width))
self.screen.addstr('|' * pipe_count, self.RED_ON_BLACK)
# Print the right side of the bar graph
_, x = self.screen.getyx()
self.screen.move(line, x + bar_graph_width - pipe_count)
self.screen.addstr('] ')
self.screen.addstr('{0} %'.format(
round(self.host_stats.cpu_usage * 100)))
def draw_memory_bar(self, line):
current_bar_size = 0
# Some params
bar_graph_width = 40
# Inialize the line
self.screen.move(line, 0)
self.screen.clrtoeol()
# Show 'Mem'
self.screen.move(line, 1)
self.screen.addstr('Mem', self.CYAN_ON_BLACK)
# Print the left side of the bar graph
self.screen.addstr(' [')
# Print the memory take by OS
if self.host_stats.mem_total > 0:
ratio = self.host_stats.mem_os / self.host_stats.mem_total
mem_os_size = int(round(ratio * bar_graph_width))
self.screen.addstr('|' * mem_os_size, self.RED_ON_BLACK)
current_bar_size += mem_os_size
# Print the memory take by VMs
if self.host_stats.mem_total > 0:
ratio = self.host_stats.mem_vms_total / self.host_stats.mem_total
mem_vms_size = int(round(ratio * bar_graph_width))
self.screen.addstr('|' * mem_vms_size, self.GREEN_ON_BLACK)
current_bar_size += mem_vms_size
# Print the memory cached
if self.host_stats.mem_total > 0:
ratio = self.host_stats.mem_cached / self.host_stats.mem_total
mem_cached_size = int(round(ratio * bar_graph_width))
self.screen.addstr('|' * mem_cached_size, self.YELLOW_ON_BLACK)
current_bar_size += mem_cached_size
# Print the right side of the bar graph
_, x = self.screen.getyx()
self.screen.move(line, x + bar_graph_width - current_bar_size)
self.screen.addstr('] ')
# Print the text aside
self.screen.addstr(
'{0}B'.format(si_unit(self.host_stats.mem_os, True)),
self.RED_ON_BLACK
)
self.screen.addstr(' / ')
self.screen.addstr(
'{0}B'.format(si_unit(self.host_stats.mem_vms_total, True)),
self.GREEN_ON_BLACK
)
self.screen.addstr(
' / {0}B'.format(si_unit(self.host_stats.mem_total, True))
)
def draw_domains(self, line):
# Initialize columns
TABLES_COLS = (
'{name:15}', '{cpu_usage:>8}', '{guest_mem:>10}', '{host_mem:>10}',
'{net_rx:>10}', '{net_tx:>10}', '{block_rd:>10}', '{block_wr:>10}'
)
# Prepare table header
COLS_NAME = dict(
name='NAME', cpu_usage='%CPU', guest_mem='MEM',
host_mem='HOST MEM', net_rx='NET RX', net_tx='NET TX',
block_rd='BLK RD', block_wr='BLK WR')
# Draw the header
self.screen.move(line, 0)
for i, pattern in enumerate(TABLES_COLS):
if self._sort_on == i:
color = self.TABLE_HEADER_SELECTED
else:
color = self.TABLE_HEADER
text = pattern.format(**COLS_NAME)
self.screen.addstr(text, color)
self.screen.addstr(
' '*(self.screen.getmaxyx()[1] - self.screen.getyx()[1]),
self.TABLE_HEADER)
domains = list(self._domains.values())
domains.sort(key=lambda dom: dom.name)
if self._sort_on == SORT_CPU:
domains.sort(key=lambda dom: dom.cpu_usage, reverse=True)
elif self._sort_on == SORT_MEM:
domains.sort(key=lambda dom: dom.host_mem, reverse=True)
for domain in domains:
self.screen.addstr(domain.format(''.join(TABLES_COLS)))
self.screen.clrtoeol()
self.screen.addch('\n')
self.screen.clrtobot()
def refresh_interface(self):
self.draw_host_bar(0)
self.draw_cpu_bar(2)
self.draw_memory_bar(3)
self.draw_domains(5)
self.screen.refresh()
def refresh(self):
while True:
try:
self.refresh_interface()
except curses.error:
pass
finally:
time.sleep(REFRESH_INTERVAL)
if __name__ == '__main__':
VMTop()
|
|
#!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create e2e test definitions.
Usage example:
In $GOPATH/src/k8s.io/test-infra,
$ bazel run //releng:generate_tests -- \
--yaml-config-path=releng/test_config.yaml \
"""
import argparse
import hashlib
import os
import ruamel.yaml as yaml
# TODO(yguo0905): Generate Prow and testgrid configurations.
PROW_CONFIG_TEMPLATE = """
tags:
- generated # AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT!
interval:
cron:
labels:
preset-service-account: "true"
preset-k8s-ssh: "true"
name:
spec:
containers:
- args:
env:
image: gcr.io/k8s-testimages/kubekins-e2e:v20210426-51fd28e-master
resources:
requests:
cpu: 1000m
memory: 3Gi
limits:
cpu: 1000m
memory: 3Gi
"""
E2E_TESTGRID_CONFIG_TEMPLATE = """
name:
gcs_prefix:
column_header:
- configuration_value: node_os_image
- configuration_value: master_os_image
- configuration_value: Commit
- configuration_value: infra-commit
"""
GCS_LOG_PREFIX = "kubernetes-jenkins/logs/"
COMMENT = 'AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT.'
def get_sha1_hash(data):
"""Returns the SHA1 hash of the specified data."""
sha1_hash = hashlib.sha1()
sha1_hash.update(data.encode('utf-8'))
return sha1_hash.hexdigest()
def substitute(job_name, lines):
"""Replace '${job_name_hash}' in lines with the SHA1 hash of job_name."""
return [line.replace('${job_name_hash}', get_sha1_hash(job_name)[:10]) \
for line in lines]
def get_args(job_name, field):
"""Returns a list of args for the given field."""
if not field:
return []
return substitute(job_name, field.get('args', []))
def write_prow_configs_file(output_file, job_defs):
"""Writes the Prow configurations into output_file."""
with open(output_file, 'w') as fp:
yaml.dump(
job_defs, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
def write_testgrid_config_file(output_file, testgrid_config):
"""Writes the TestGrid test group configurations into output_file."""
with open(output_file, 'w') as fp:
fp.write('# ' + COMMENT + '\n\n')
yaml.dump(
testgrid_config, fp, Dumper=yaml.RoundTripDumper, width=float("inf"))
def apply_job_overrides(envs_or_args, job_envs_or_args):
'''Applies the envs or args overrides defined in the job level'''
original_envs_or_args = envs_or_args[:]
for job_env_or_arg in job_envs_or_args:
name = job_env_or_arg.split('=', 1)[0]
env_or_arg = next(
(x for x in original_envs_or_args if (x.strip().startswith('%s=' % name) or
x.strip() == name)), None)
if env_or_arg:
envs_or_args.remove(env_or_arg)
envs_or_args.append(job_env_or_arg)
class E2ENodeTest:
def __init__(self, job_name, job, config):
self.job_name = job_name
self.job = job
self.common = config['nodeCommon']
self.images = config['nodeImages']
self.k8s_versions = config['nodeK8sVersions']
self.test_suites = config['nodeTestSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite, k8s_version):
"""Returns the Prow config for the job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
# use cluster from test_suite, or job, or not at all
if 'cluster' in test_suite:
prow_config['cluster'] = test_suite['cluster']
elif 'cluster' in self.job:
prow_config['cluster'] = self.job['cluster']
# use resources from test_suite, or job, or default
if 'resources' in test_suite:
prow_config['resources'] = test_suite['resources']
elif 'resources' in self.job:
prow_config['resources'] = self.job['resources']
# pull interval or cron from job
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['cron']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
if not container['env']:
container['env'] = []
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
container['args'].extend(k8s_version.get('args', []))
container['args'].append('--root=/go/src')
container['env'].extend([{'name':'GOPATH', 'value': '/go'}])
# Specify the appropriate kubekins-e2e image. This allows us to use a
# specific image (containing a particular Go version) to build and
# trigger the node e2e test to avoid issues like
# https://github.com/kubernetes/kubernetes/issues/43534.
if k8s_version.get('prowImage', None):
container['image'] = k8s_version['prowImage']
return prow_config
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 6:
raise ValueError('Expected 6 fields in job name', self.job_name)
image = self.images[fields[3]]
k8s_version = self.k8s_versions[fields[4][3:]]
test_suite = self.test_suites[fields[5]]
# envs are disallowed in node e2e tests.
if 'envs' in self.common or 'envs' in image or 'envs' in test_suite:
raise ValueError(
'envs are disallowed in node e2e test', self.job_name)
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates prow config.
prow_config = self.__get_prow_config(test_suite, k8s_version)
# Combine --node-args
node_args = []
job_args = []
for arg in job_config['args']:
if '--node-args=' in arg:
node_args.append(arg.split('=', 1)[1])
else:
job_args.append(arg)
if node_args:
flag = '--node-args='
for node_arg in node_args:
flag += '%s ' % node_arg
job_args.append(flag.strip())
job_config['args'] = job_args
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[3],
fields[4])
annotations = prow_config.setdefault('annotations', {})
annotations['testgrid-dashboards'] = dashboard
tab_name = '%s-%s-%s' % (fields[3], fields[4], fields[5])
annotations['testgrid-tab-name'] = tab_name
return job_config, prow_config, None
class E2ETest:
def __init__(self, output_dir, job_name, job, config):
self.env_filename = os.path.join(output_dir, '%s.env' % job_name)
self.job_name = job_name
self.job = job
self.common = config['common']
self.cloud_providers = config['cloudProviders']
self.images = config['images']
self.k8s_versions = config['k8sVersions']
self.test_suites = config['testSuites']
def __get_job_def(self, args):
"""Returns the job definition from the given args."""
return {
'scenario': 'kubernetes_e2e',
'args': args,
'sigOwners': self.job.get('sigOwners') or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': COMMENT,
}
def __get_prow_config(self, test_suite):
"""Returns the Prow config for the e2e job from the given fields."""
prow_config = yaml.round_trip_load(PROW_CONFIG_TEMPLATE)
prow_config['name'] = self.job_name
# use cluster from test_suite, or job, or not at all
if 'cluster' in test_suite:
prow_config['cluster'] = test_suite['cluster']
elif 'cluster' in self.job:
prow_config['cluster'] = self.job['cluster']
# use resources from test_suite, or job, or default
if 'resources' in test_suite:
prow_config['resources'] = test_suite['resources']
elif 'resources' in self.job:
prow_config['resources'] = self.job['resources']
if 'interval' in self.job:
del prow_config['cron']
prow_config['interval'] = self.job['interval']
elif 'cron' in self.job:
del prow_config['interval']
prow_config['cron'] = self.job['cron']
else:
raise Exception("no interval or cron definition found")
# Assumes that the value in --timeout is of minutes.
timeout = int(next(
x[10:-1] for x in test_suite['args'] if (
x.startswith('--timeout='))))
container = prow_config['spec']['containers'][0]
if not container['args']:
container['args'] = []
container['args'].append('--bare')
# Prow timeout = job timeout + 20min
container['args'].append('--timeout=%d' % (timeout + 20))
return prow_config
def __get_testgrid_config(self):
tg_config = yaml.round_trip_load(E2E_TESTGRID_CONFIG_TEMPLATE)
tg_config['name'] = self.job_name
tg_config['gcs_prefix'] = GCS_LOG_PREFIX + self.job_name
return tg_config
def initialize_dashboards_with_release_blocking_info(self, version):
dashboards = []
if self.job.get('releaseBlocking'):
dashboards.append('sig-release-%s-blocking' % version)
elif self.job.get('releaseInforming'):
dashboards.append('sig-release-%s-informing' % version)
else:
dashboards.append('sig-release-generated')
return dashboards
def generate(self):
'''Returns the job and the Prow configurations for this test.'''
fields = self.job_name.split('-')
if len(fields) != 7:
raise ValueError('Expected 7 fields in job name', self.job_name)
cloud_provider = self.cloud_providers[fields[3]]
image = self.images[fields[4]]
k8s_version = self.k8s_versions[fields[5][3:]]
test_suite = self.test_suites[fields[6]]
# Generates args.
args = []
args.extend(get_args(self.job_name, self.common))
args.extend(get_args(self.job_name, cloud_provider))
args.extend(get_args(self.job_name, image))
args.extend(get_args(self.job_name, k8s_version))
args.extend(get_args(self.job_name, test_suite))
# Generates job config.
job_config = self.__get_job_def(args)
# Generates Prow config.
prow_config = self.__get_prow_config(test_suite)
tg_config = self.__get_testgrid_config()
annotations = prow_config.setdefault('annotations', {})
tab_name = '%s-%s-%s-%s' % (fields[3], fields[4], fields[5], fields[6])
annotations['testgrid-tab-name'] = tab_name
dashboards = self.initialize_dashboards_with_release_blocking_info(k8s_version['version'])
if image.get('testgrid_prefix') is not None:
dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[4],
fields[5])
dashboards.append(dashboard)
annotations['testgrid-dashboards'] = ', '.join(dashboards)
if 'testgridNumFailuresToAlert' in self.job:
annotations['testgrid-num-failures-to-alert'] = ('%s' %
self.job['testgridNumFailuresToAlert'])
return job_config, prow_config, tg_config
def for_each_job(output_dir, job_name, job, yaml_config):
"""Returns the job config and the Prow config for one test job."""
fields = job_name.split('-')
if len(fields) < 3:
raise ValueError('Expected at least 3 fields in job name', job_name)
job_type = fields[2]
# Generates configurations.
if job_type == 'e2e':
generator = E2ETest(output_dir, job_name, job, yaml_config)
elif job_type == 'e2enode':
generator = E2ENodeTest(job_name, job, yaml_config)
else:
raise ValueError('Unexpected job type ', job_type)
job_config, prow_config, testgrid_config = generator.generate()
# Applies job-level overrides.
apply_job_overrides(job_config['args'], get_args(job_name, job))
# merge job_config into prow_config
args = prow_config['spec']['containers'][0]['args']
args.append('--scenario=' + job_config['scenario'])
args.append('--')
args.extend(job_config['args'])
return prow_config, testgrid_config
def main(yaml_config_path, output_dir, testgrid_output_path):
"""Creates test job definitions.
Converts the test configurations in yaml_config_path to the job definitions
in output_dir/generated.yaml.
"""
# TODO(yguo0905): Validate the configurations from yaml_config_path.
with open(yaml_config_path) as fp:
yaml_config = yaml.safe_load(fp)
output_config = {}
output_config['periodics'] = []
testgrid_config = {'test_groups': []}
for job_name, _ in yaml_config['jobs'].items():
# Get the envs and args for each job defined under "jobs".
prow, testgrid = for_each_job(
output_dir, job_name, yaml_config['jobs'][job_name], yaml_config)
output_config['periodics'].append(prow)
if testgrid is not None:
testgrid_config['test_groups'].append(testgrid)
# Write the job definitions to --output-dir/generated.yaml
write_prow_configs_file(output_dir + 'generated.yaml', output_config)
write_testgrid_config_file(testgrid_output_path, testgrid_config)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Create test definitions from the given yaml config')
PARSER.add_argument('--yaml-config-path', help='Path to config.yaml')
PARSER.add_argument(
'--output-dir',
help='Prowjob config output dir',
default='config/jobs/kubernetes/generated/')
PARSER.add_argument(
'--testgrid-output-path',
help='Path to testgrid output file',
default='config/testgrids/generated-test-config.yaml')
ARGS = PARSER.parse_args()
main(
ARGS.yaml_config_path,
ARGS.output_dir,
ARGS.testgrid_output_path)
|
|
from nsbaseresource import NSBaseResource
__author__ = 'vlazarenko'
class NSCSVServerRewritePolicyBinding(NSBaseResource):
def __init__(self, json_data=None):
"""
Supplied with json_data the object can be pre-filled
"""
super(NSCSVServerRewritePolicyBinding, self).__init__()
self.options = {'policyname': '',
'priority': '',
'gotopriorityexpression': '',
'bindpoint': '',
'invoke': '',
'labeltype': '',
'labelname': '',
'name': ''}
if not (json_data is None):
for key in json_data.keys():
if key in self.options.keys():
self.options[key] = json_data[key]
self.resourcetype = NSCSVServerRewritePolicyBinding.get_resourcetype()
@staticmethod
def get_resourcetype():
return "csvserver_rewritepolicy_binding"
def set_policyname(self, policyname):
"""
Bound classic (Compression/Authorization/Filter/Audit) policies to this vserver.
Default value: 0
"""
self.options['policyname'] = policyname
def get_policyname(self):
"""
Bound classic (Compression/Authorization/Filter/Audit) policies to this vserver.
Default value: 0
"""
return self.options['policyname']
def set_priority(self, priority):
"""
Priority for the policy.
Default value: 0
"""
self.options['priority'] = priority
def get_priority(self):
"""
Priority for the policy.
Default value: 0
"""
return self.options['priority']
def set_gotopriorityexpression(self, gotopriorityexpression):
"""
Expression specifying the priority of the next policy
which will get evaluated if the current policy rule
evaluates to TRUE.
Default value: 0
"""
self.options['gotopriorityexpression'] = gotopriorityexpression
def get_gotopriorityexpression(self):
"""
Expression specifying the priority of the next policy
which will get evaluated if the current policy rule
evaluates to TRUE.
Default value: 0
"""
return self.options['gotopriorityexpression']
def set_bindpoint(self, bindpoint):
"""
The bindpoint to which the policy is bound.
Default value: 0
"""
self.options['bindpoint'] = bindpoint
def get_bindpoint(self):
"""
The bindpoint to which the policy is bound.
Default value: 0
"""
return self.options['bindpoint']
def set_invoke(self, invoke):
"""
Invoke flag.
Default value: 0
"""
self.options['invoke'] = invoke
def get_invoke(self):
"""
Invoke flag.
Default value: 0
"""
return self.options['invoke']
def set_labeltype(self, labeltype):
"""
The invocation type.
Default value: 0
"""
self.options['labeltype'] = labeltype
def get_labeltype(self):
"""
The invocation type.
Default value: 0
"""
return self.options['labeltype']
def set_labelname(self, labelname):
"""
Name of the label invoked.
Default value: 0
"""
self.options['labelname'] = labelname
def get_labelname(self):
"""
Name of the label invoked.
Default value: 0
"""
return self.options['labelname']
def set_name(self, name):
"""
The virtual server name (created with the add cs vserver or
add cr vserver command) for which the content
switching policy will be set.
Default value: 0
Minimum length = 1.
"""
self.options['name'] = name
def get_name(self):
"""
The virtual server name (created with the add cs vserver or
add cr vserver command) for which the content
switching policy will be set.
Default value: 0
Minimum length = 1.
"""
return self.options['name']
# Operations methods
@staticmethod
def get(nitro, csvserver_rewritepolicy_binding):
"""
Use this API to fetch all configured csvserver_rewritepolicy_binding resources.
"""
__url = nitro.get_url() + NSCSVServerRewritePolicyBinding.get_resourcetype() + "/" + csvserver_rewritepolicy_binding.get_name()
__json_resources = nitro.get(__url).get_response_field(NSCSVServerRewritePolicyBinding.get_resourcetype())
__resources = []
for json_resource in __json_resources:
__resources.append(NSCSVServerRewritePolicyBinding(json_resource))
return __resources
@staticmethod
def add(nitro, csvserver_rewritepolicy_binding):
"""
Use this API to add csvserver_rewritepolicy_binding.
"""
__csvserver_rewritepolicy_binding = NSCSVServerRewritePolicyBinding()
__csvserver_rewritepolicy_binding.set_name(csvserver_rewritepolicy_binding.get_name())
__csvserver_rewritepolicy_binding.set_policyname(csvserver_rewritepolicy_binding.get_policyname())
__csvserver_rewritepolicy_binding.set_priority(csvserver_rewritepolicy_binding.get_priority())
__csvserver_rewritepolicy_binding.set_gotopriorityexpression(csvserver_rewritepolicy_binding.get_gotopriorityexpression())
__csvserver_rewritepolicy_binding.set_bindpoint(csvserver_rewritepolicy_binding.get_bindpoint())
__csvserver_rewritepolicy_binding.set_invoke(csvserver_rewritepolicy_binding.get_invoke())
__csvserver_rewritepolicy_binding.set_labeltype(csvserver_rewritepolicy_binding.get_labeltype())
__csvserver_rewritepolicy_binding.set_labelname(csvserver_rewritepolicy_binding.get_labelname())
return __csvserver_rewritepolicy_binding.update_resource(nitro)
@staticmethod
def delete(nitro, csvserver_rewritepolicy_binding):
"""
Use this API to delete csvserver_rewritepolicy_binding of a given name.
"""
__csvserver_rewritepolicy_binding = NSCSVServerRewritePolicyBinding()
__csvserver_rewritepolicy_binding.set_name(csvserver_rewritepolicy_binding.get_name())
__csvserver_rewritepolicy_binding.set_policyname(csvserver_rewritepolicy_binding.get_policyname())
__csvserver_rewritepolicy_binding.set_priority(csvserver_rewritepolicy_binding.get_priority())
__csvserver_rewritepolicy_binding.set_bindpoint(csvserver_rewritepolicy_binding.get_bindpoint())
nsresponse = __csvserver_rewritepolicy_binding.delete_resource(nitro)
return nsresponse
|
|
# -*- coding: utf-8 -*-
"""
servers.py
~~~~~~~~~~~~
This module implements servers HP OneView REST API
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
__title__ = 'servers'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2015) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from hpOneView.common import *
from hpOneView.connection import *
from hpOneView.activity import *
from hpOneView.exceptions import *
class servers(object):
def __init__(self, con):
self._con = con
self._activity = activity(con)
###########################################################################
# Server Hardware
###########################################################################
def get_server_by_bay(self, baynum):
servers = get_members(self._con.get(uri['servers']))
for server in servers:
if server['position'] == baynum:
return server
def get_server_by_name(self, name):
servers = get_members(self._con.get(uri['servers']))
for server in servers:
if server['shortModel'] == name:
return server
def get_servers(self):
return get_members(self._con.get(uri['servers']))
def get_server_hardware_types(self):
body = self._con.get(uri['server-hardware-types'])
return get_members(body)
def set_server_powerstate(self, server, state, force=False, blocking=True,
verbose=False):
if state == 'Off' and force is True:
powerRequest = make_powerstate_dict('Off', 'PressAndHold')
elif state == 'Off' and force is False:
powerRequest = make_powerstate_dict('Off', 'MomentaryPress')
elif state == 'On':
powerRequest = make_powerstate_dict('On', 'MomentaryPress')
elif state == 'Reset':
powerRequest = make_powerstate_dict('On', 'Reset')
task, body = self._con.put(server['uri'] + '/powerState', powerRequest)
if blocking is True:
task = self._activity.wait4task(task, tout=60, verbose=verbose)
return task
def delete_server(self, server, force=False, blocking=True, verbose=False):
if force:
task, body = self._con.delete(server['uri'] + '?force=True')
else:
task, body = self._con.delete(server['uri'])
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return task
def update_server(self, server):
task, body = self._con.put(server['uri'], server)
return body
def add_server(self, server, blocking=True, verbose=False):
task, body = self._con.post(uri['servers'], server)
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
if 'type' in task and task['type'].startswith('Task'):
entity = self._activity.get_task_associated_resource(task)
server = self._con.get(entity['resourceUri'])
return server
return task
###########################################################################
# Server Profiles
###########################################################################
def create_server_profile(self, profile, blocking=True, verbose=False):
# Creating a profile returns a task with no resource uri
task, body = self._con.post(uri['profiles'], profile)
if profile['firmware'] is None:
tout = 600
else:
tout = 3600
if blocking is True:
task = self._activity.wait4task(task, tout, verbose=verbose)
if 'type' in task and task['type'].startswith('Task'):
entity = self._activity.get_task_associated_resource(task)
profile = self._con.get(entity['resourceUri'])
return profile
return task
def remove_server_profile(self, profile, force=False, blocking=True, verbose=False):
if force:
task, body = self._con.delete(profile['uri'] + '?force=True')
else:
task, body = self._con.delete(profile['uri'])
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return task
def get_server_profiles(self):
body = self._con.get(uri['profiles'])
return get_members(body)
def update_server_profile(self, profile, blocking=True, verbose=False):
task, body = self._con.put(profile['uri'], profile)
try:
if profile['firmware']['firmwareBaselineUri'] is None:
tout = 600
else:
tout = 3600
except Exception:
tout = 600
# Update the task to get the associated resource uri
if blocking is True:
task = self._activity.wait4task(task, tout=tout, verbose=verbose)
profileResource = self._activity.get_task_associated_resource(task)
profile = self._con.get(profileResource['resourceUri'])
return profile
###########################################################################
# Enclosures
###########################################################################
def get_enclosures(self):
body = self._con.get(uri['enclosures'])
return get_members(body)
def add_enclosure(self, enclosure, blocking=True, verbose=False):
task, body = self._con.post(uri['enclosures'], enclosure)
if enclosure['state'] is 'Monitored':
tout = 600
elif enclosure['firmwareBaselineUri'] is None:
tout = 600
else:
tout = 3600
if blocking is True:
task = self._activity.wait4task(task, tout, verbose=verbose)
if 'type' in task and task['type'].startswith('Task'):
entity = self._activity.get_task_associated_resource(task)
enclosure = self._con.get(entity['resourceUri'])
return enclosure
return task
def remove_enclosure(self, enclosure, force=False, blocking=True,
verbose=False):
if force:
task, body = self._con.delete(enclosure['uri'] + '?force=True')
else:
task, body = self._con.delete(enclosure['uri'])
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return task
###########################################################################
# Enclosure Groups
###########################################################################
def create_enclosure_group(self, egroup):
# Creating an Enclosure Group returns the group, NOT a task
task, body = self._con.post(uri['enclosureGroups'], egroup)
return body
def delete_enclosure_group(self, egroup):
self._con.delete(egroup['uri'])
def get_enclosure_groups(self):
return get_members(self._con.get(uri['enclosureGroups']))
def update_enclosure_group(self, enclosuregroup):
task, body = self._con.put(enclosuregroup['uri'], enclosuregroup)
return body
###########################################################################
# ID Pools
###########################################################################
def get_pool(self, pooltype):
body = self._con.get(uri['idpool'] + '/' + pooltype)
return body
def get_vmac_pool(self):
body = self._con.get(uri['vmac-pool'])
return body
def get_vwwn_pool(self):
body = self._con.get(uri['vwwn-pool'])
return body
def get_vsn_pool(self):
body = self._con.get(uri['vsn-pool'])
return body
def get_profile_networks(self):
body = self._con.get(uri['profile-networks'])
return body
def get_profile_available_servers(self):
body = self._con.get(uri['profile-available-servers'])
return body
def get_profile_available_storage_systems(self):
body = self._con.get(uri['profile-available-storage-systems'])
return body
def get_profile_ports(self):
body = self._con.get(uri['profile-ports'])
return body
# TODO put pool
def allocate_pool_ids(self, url, count):
allocatorUrl = '%s/allocator' % url
allocatorBody = {'count': count}
task, body = self._con.put(allocatorUrl, allocatorBody)
return body
def release_pool_ids(self, url, idList):
collectorUrl = '%s/collector' % url
collectorBody = {'idList': idList}
task, body = self._con.put(collectorUrl, collectorBody)
return body
def allocate_range_ids(self, allocatorUrl, count):
task, body = self._con.put(allocatorUrl, {'count': count})
return body
def release_range_ids(self, collectorUrl, idList):
task, body = self._con.put(collectorUrl, {'idList': idList})
return body
# TODO POST Range
def enable_range(self, url):
prange = self._con.get(url)
prange['enabled'] = True
task, body = self._con.put(url, prange)
return body
def disable_range(self, url):
prange = self._con.get(url)
prange['enabled'] = False
task, body = self._con.put(url, prange)
return body
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from contextlib import closing
from itertools import chain
import os
import textwrap
from xml.etree import ElementTree
from twitter.common.collections import OrderedDict
from pants.backend.jvm.jvm_tool_bootstrapper import JvmToolBootstrapper
from pants.backend.jvm.scala.target_platform import TargetPlatform
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnit
from pants.util.contextutil import open_zip as open_jar
from pants.util.dirutil import safe_open
# Well known metadata file required to register scalac plugins with nsc.
_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
class ZincUtils(object):
"""Convenient wrapper around zinc invocations.
Instances are immutable, and all methods are reentrant (assuming that the java_runner is).
"""
class DepLookupError(AddressLookupError):
"""Thrown when a dependency can't be found."""
pass
_ZINC_MAIN = 'com.typesafe.zinc.Main'
def __init__(self, context, nailgun_task, jvm_options, color=True, log_level='info'):
self.context = context
self._nailgun_task = nailgun_task # We run zinc on this task's behalf.
self._jvm_options = jvm_options
self._color = color
self._log_level = log_level
self._jvm_tool_bootstrapper = JvmToolBootstrapper(self.context.products)
# The target scala version.
self._compile_bootstrap_key = 'scalac'
self._compile_bootstrap_tools = TargetPlatform(config=context.config).compiler_specs
self._jvm_tool_bootstrapper.register_jvm_tool(self._compile_bootstrap_key,
self._compile_bootstrap_tools,
ini_section='scala-compile',
ini_key='compile-bootstrap-tools')
# The zinc version (and the scala version it needs, which may differ from the target version).
self._zinc_bootstrap_key = 'zinc'
self._jvm_tool_bootstrapper.register_jvm_tool_from_config(self._zinc_bootstrap_key,
context.config,
ini_section='scala-compile',
ini_key='zinc-bootstrap-tools',
default=['//:zinc'])
# Compiler plugins.
plugins_bootstrap_tools = context.config.getlist('scala-compile',
'scalac-plugin-bootstrap-tools',
default=[])
if plugins_bootstrap_tools:
self._plugins_bootstrap_key = 'plugins'
self._jvm_tool_bootstrapper.register_jvm_tool(self._plugins_bootstrap_key,
plugins_bootstrap_tools,
ini_section='scala-compile',
ini_key='scalac-plugin-bootstrap-tools')
else:
self._plugins_bootstrap_key = None
@property
def _zinc_classpath(self):
return self._jvm_tool_bootstrapper.get_jvm_tool_classpath(self._zinc_bootstrap_key)
@property
def _compiler_classpath(self):
return self._jvm_tool_bootstrapper.get_jvm_tool_classpath(self._compile_bootstrap_key)
@property
def _plugin_jars(self):
if self._plugins_bootstrap_key:
return self._jvm_tool_bootstrapper.get_jvm_tool_classpath(self._plugins_bootstrap_key)
else:
return []
@property
def _zinc_jar_args(self):
zinc_jars = ZincUtils.identify_zinc_jars(self._zinc_classpath)
# The zinc jar names are also the flag names.
return (list(chain.from_iterable([['-%s' % name, jarpath]
for (name, jarpath) in zinc_jars.items()])) +
['-scala-path', ':'.join(self._compiler_classpath)])
def _plugin_args(self):
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = [p for val in self._nailgun_task.get_options().plugins for p in val.split(',')]
plugin_args = self.context.config.getdict('compile.scala', 'plugin-args', default={})
active_plugins = self.find_plugins(plugin_names)
ret = []
for name, jar in active_plugins.items():
ret.append('-S-Xplugin:%s' % jar)
for arg in plugin_args.get(name, []):
ret.append('-S-P:%s:%s' % (name, arg))
return ret
def plugin_jars(self):
"""The jars containing code for enabled plugins."""
return self._plugin_jars
def _run_zinc(self, args, workunit_name='zinc', workunit_labels=None):
zinc_args = [
'-log-level', self._log_level,
]
if not self._color:
zinc_args.append('-no-color')
zinc_args.extend(self._zinc_jar_args)
zinc_args.extend(args)
return self._nailgun_task.runjava(classpath=self._zinc_classpath,
main=ZincUtils._ZINC_MAIN,
jvm_options=self._jvm_options,
args=zinc_args,
workunit_name=workunit_name,
workunit_labels=workunit_labels)
def platform_version_info(self):
ret = []
# Go through all the bootstrap tools required to compile.
for target in self._compile_bootstrap_tools:
# Resolve to their actual targets.
try:
deps = self.context.resolve(target)
except AddressLookupError as e:
raise self.DepLookupError("{message}\n referenced from [{section}] key: {key} in pants.ini"
.format(message=e, section='scala-compile',
key='compile-bootstrap-tools'))
for lib in (t for t in deps if isinstance(t, JarLibrary)):
for jar in lib.jar_dependencies:
ret.append(jar.cache_key())
return sorted(ret)
def compile(self, opts, classpath, sources, output_dir, analysis_file, upstream_analysis_files):
args = list(opts) # Make a copy
args.extend(self._plugin_args())
if upstream_analysis_files:
args.extend(
['-analysis-map', ','.join(['%s:%s' % kv for kv in upstream_analysis_files.items()])])
args.extend([
'-analysis-cache', analysis_file,
# We add compiler_classpath to ensure the scala-library jar is on the classpath.
# TODO: This also adds the compiler jar to the classpath, which compiled code shouldn't
# usually need. Be more selective?
'-classpath', ':'.join(self._compiler_classpath + classpath),
'-d', output_dir
])
args.extend(sources)
self.log_zinc_file(analysis_file)
if self._run_zinc(args, workunit_labels=[WorkUnit.COMPILER]):
raise TaskError('Zinc compile failed.')
@staticmethod
def write_plugin_info(resources_dir, target):
root = os.path.join(resources_dir, target.id)
plugin_info_file = os.path.join(root, _PLUGIN_INFO_FILE)
with safe_open(plugin_info_file, 'w') as f:
f.write(textwrap.dedent('''
<plugin>
<name>%s</name>
<classname>%s</classname>
</plugin>
''' % (target.plugin, target.classname)).strip())
return root, plugin_info_file
# These are the names of the various jars zinc needs. They are, conveniently and
# non-coincidentally, the names of the flags used to pass the jar locations to zinc.
ZINC_JAR_NAMES = ['compiler-interface', 'sbt-interface']
@staticmethod
def identify_zinc_jars(zinc_classpath):
"""Find the named jars in the zinc classpath.
TODO: Make these mappings explicit instead of deriving them by jar name heuristics.
"""
ret = OrderedDict()
ret.update(ZincUtils.identify_jars(ZincUtils.ZINC_JAR_NAMES, zinc_classpath))
return ret
@staticmethod
def identify_jars(names, jars):
jars_by_name = {}
jars_and_filenames = [(x, os.path.basename(x)) for x in jars]
for name in names:
jar_for_name = None
for jar, filename in jars_and_filenames:
if filename.startswith(name):
jar_for_name = jar
break
if jar_for_name is None:
raise TaskError('Couldn\'t find jar named %s' % name)
else:
jars_by_name[name] = jar_for_name
return jars_by_name
def find_plugins(self, plugin_names):
"""Returns a map from plugin name to plugin jar."""
plugin_names = set(plugin_names)
plugins = {}
buildroot = get_buildroot()
# plugin_jars is the universe of all possible plugins and their transitive deps.
# Here we select the ones to actually use.
for jar in self.plugin_jars():
with open_jar(jar, 'r') as jarfile:
try:
with closing(jarfile.open(_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
plugin_info = ElementTree.parse(plugin_info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError(
'File %s in %s is not a valid scalac plugin descriptor' % (_PLUGIN_INFO_FILE, jar))
name = plugin_info.find('name').text
if name in plugin_names:
if name in plugins:
raise TaskError('Plugin %s defined in %s and in %s' % (name, plugins[name], jar))
# It's important to use relative paths, as the compiler flags get embedded in the zinc
# analysis file, and we port those between systems via the artifact cache.
plugins[name] = os.path.relpath(jar, buildroot)
except KeyError:
pass
unresolved_plugins = plugin_names - set(plugins.keys())
if unresolved_plugins:
raise TaskError('Could not find requested plugins: %s' % list(unresolved_plugins))
return plugins
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: %s (%s)' %
(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
|
|
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import product
from unittest import mock
import warnings
import pytest
from neo4j import (
Address,
Record,
Result,
ResultSummary,
ServerInfo,
SummaryCounters,
Version,
)
from neo4j._async_compat.util import Util
from neo4j.data import DataHydrator
from neo4j.exceptions import (
ResultConsumedError,
ResultNotSingleError,
)
from ...._async_compat import mark_sync_test
class Records:
def __init__(self, fields, records):
assert all(len(fields) == len(r) for r in records)
self.fields = fields
# self.records = [{"record_values": r} for r in records]
self.records = records
def __len__(self):
return self.records.__len__()
def __iter__(self):
return self.records.__iter__()
def __getitem__(self, item):
return self.records.__getitem__(item)
class ConnectionStub:
class Message:
def __init__(self, message, *args, **kwargs):
self.message = message
self.args = args
self.kwargs = kwargs
def _cb(self, cb_name, *args, **kwargs):
# print(self.message, cb_name.upper(), args, kwargs)
cb = self.kwargs.get(cb_name)
Util.callback(cb, *args, **kwargs)
def on_success(self, metadata):
self._cb("on_success", metadata)
def on_summary(self):
self._cb("on_summary")
def on_records(self, records):
self._cb("on_records", records)
def __eq__(self, other):
return self.message == other
def __repr__(self):
return "Message(%s)" % self.message
def __init__(self, records=None, run_meta=None, summary_meta=None,
force_qid=False):
self._multi_result = isinstance(records, (list, tuple))
if self._multi_result:
self._records = records
self._use_qid = True
else:
self._records = records,
self._use_qid = force_qid
self.fetch_idx = 0
self._qid = -1
self.most_recent_qid = None
self.record_idxs = [0] * len(self._records)
self.to_pull = [None] * len(self._records)
self._exhausted = [False] * len(self._records)
self.queued = []
self.sent = []
self.run_meta = run_meta
self.summary_meta = summary_meta
ConnectionStub.server_info.update({"server": "Neo4j/4.3.0"})
self.unresolved_address = None
def send_all(self):
self.sent += self.queued
self.queued = []
def fetch_message(self):
if self.fetch_idx >= len(self.sent):
pytest.fail("Waits for reply to never sent message")
msg = self.sent[self.fetch_idx]
if msg == "RUN":
self.fetch_idx += 1
self._qid += 1
meta = {"fields": self._records[self._qid].fields,
**(self.run_meta or {})}
if self._use_qid:
meta.update(qid=self._qid)
msg.on_success(meta)
elif msg == "DISCARD":
self.fetch_idx += 1
qid = msg.kwargs.get("qid", -1)
if qid < 0:
qid = self._qid
self.record_idxs[qid] = len(self._records[qid])
msg.on_success(self.summary_meta or {})
msg.on_summary()
elif msg == "PULL":
qid = msg.kwargs.get("qid", -1)
if qid < 0:
qid = self._qid
if self._exhausted[qid]:
pytest.fail("PULLing exhausted result")
if self.to_pull[qid] is None:
n = msg.kwargs.get("n", -1)
if n < 0:
n = len(self._records[qid])
self.to_pull[qid] = \
min(n, len(self._records[qid]) - self.record_idxs[qid])
# if to == len(self._records):
# self.fetch_idx += 1
if self.to_pull[qid] > 0:
record = self._records[qid][self.record_idxs[qid]]
self.record_idxs[qid] += 1
self.to_pull[qid] -= 1
msg.on_records([record])
elif self.to_pull[qid] == 0:
self.to_pull[qid] = None
self.fetch_idx += 1
if self.record_idxs[qid] < len(self._records[qid]):
msg.on_success({"has_more": True})
else:
msg.on_success(
{"bookmark": "foo", **(self.summary_meta or {})}
)
self._exhausted[qid] = True
msg.on_summary()
def fetch_all(self):
while self.fetch_idx < len(self.sent):
self.fetch_message()
def run(self, *args, **kwargs):
self.queued.append(ConnectionStub.Message("RUN", *args, **kwargs))
def discard(self, *args, **kwargs):
self.queued.append(ConnectionStub.Message("DISCARD", *args, **kwargs))
def pull(self, *args, **kwargs):
self.queued.append(ConnectionStub.Message("PULL", *args, **kwargs))
server_info = ServerInfo(Address(("bolt://localhost", 7687)), Version(4, 3))
def defunct(self):
return False
class HydratorStub(DataHydrator):
def hydrate(self, values):
return values
def noop(*_, **__):
pass
def fetch_and_compare_all_records(
result, key, expected_records, method, limit=None
):
received_records = []
if method == "for loop":
for record in result:
assert isinstance(record, Record)
received_records.append([record.data().get(key, None)])
if limit is not None and len(received_records) == limit:
break
if limit is None:
assert result._exhausted
elif method == "next":
n = len(expected_records) if limit is None else limit
for _ in range(n):
record = Util.next(result)
received_records.append([record.get(key, None)])
if limit is None:
with pytest.raises(StopIteration):
Util.next(result)
assert result._exhausted
elif method == "one iter":
iter_ = Util.iter(result)
n = len(expected_records) if limit is None else limit
for _ in range(n):
record = Util.next(iter_)
received_records.append([record.get(key, None)])
if limit is None:
with pytest.raises(StopIteration):
Util.next(iter_)
assert result._exhausted
elif method == "new iter":
n = len(expected_records) if limit is None else limit
for _ in range(n):
iter_ = Util.iter(result)
record = Util.next(iter_)
received_records.append([record.get(key, None)])
if limit is None:
iter_ = Util.iter(result)
with pytest.raises(StopIteration):
Util.next(iter_)
assert result._exhausted
else:
raise ValueError()
assert received_records == expected_records
@pytest.mark.parametrize("method",
("for loop", "next", "one iter", "new iter"))
@pytest.mark.parametrize("records", (
[],
[[42]],
[[1], [2], [3], [4], [5]],
))
@mark_sync_test
def test_result_iteration(method, records):
connection = ConnectionStub(records=Records(["x"], records))
result = Result(connection, HydratorStub(), 2, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
fetch_and_compare_all_records(result, "x", records, method)
@pytest.mark.parametrize("method",
("for loop", "next", "one iter", "new iter"))
@pytest.mark.parametrize("invert_fetch", (True, False))
@mark_sync_test
def test_parallel_result_iteration(method, invert_fetch):
records1 = [[i] for i in range(1, 6)]
records2 = [[i] for i in range(6, 11)]
connection = ConnectionStub(
records=(Records(["x"], records1), Records(["x"], records2))
)
result1 = Result(connection, HydratorStub(), 2, noop, noop)
result1._run("CYPHER1", {}, None, None, "r", None)
result2 = Result(connection, HydratorStub(), 2, noop, noop)
result2._run("CYPHER2", {}, None, None, "r", None)
if invert_fetch:
fetch_and_compare_all_records(
result2, "x", records2, method
)
fetch_and_compare_all_records(
result1, "x", records1, method
)
else:
fetch_and_compare_all_records(
result1, "x", records1, method
)
fetch_and_compare_all_records(
result2, "x", records2, method
)
@pytest.mark.parametrize("method",
("for loop", "next", "one iter", "new iter"))
@pytest.mark.parametrize("invert_fetch", (True, False))
@mark_sync_test
def test_interwoven_result_iteration(method, invert_fetch):
records1 = [[i] for i in range(1, 10)]
records2 = [[i] for i in range(11, 20)]
connection = ConnectionStub(
records=(Records(["x"], records1), Records(["y"], records2))
)
result1 = Result(connection, HydratorStub(), 2, noop, noop)
result1._run("CYPHER1", {}, None, None, "r", None)
result2 = Result(connection, HydratorStub(), 2, noop, noop)
result2._run("CYPHER2", {}, None, None, "r", None)
start = 0
for n in (1, 2, 3, 1, None):
end = n if n is None else start + n
if invert_fetch:
fetch_and_compare_all_records(
result2, "y", records2[start:end], method, n
)
fetch_and_compare_all_records(
result1, "x", records1[start:end], method, n
)
else:
fetch_and_compare_all_records(
result1, "x", records1[start:end], method, n
)
fetch_and_compare_all_records(
result2, "y", records2[start:end], method, n
)
start = end
@pytest.mark.parametrize("records", ([[1], [2]], [[1]], []))
@pytest.mark.parametrize("fetch_size", (1, 2))
@mark_sync_test
def test_result_peek(records, fetch_size):
connection = ConnectionStub(records=Records(["x"], records))
result = Result(connection, HydratorStub(), fetch_size, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
for i in range(len(records) + 1):
record = result.peek()
if i == len(records):
assert record is None
else:
assert isinstance(record, Record)
assert record.get("x") == records[i][0]
iter_ = Util.iter(result)
Util.next(iter_) # consume the record
@pytest.mark.parametrize("records", ([[1], [2]], [[1]], []))
@pytest.mark.parametrize("fetch_size", (1, 2))
@pytest.mark.parametrize("default", (True, False))
@mark_sync_test
def test_result_single_non_strict(records, fetch_size, default):
kwargs = {}
if not default:
kwargs["strict"] = False
connection = ConnectionStub(records=Records(["x"], records))
result = Result(connection, HydratorStub(), fetch_size, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
if len(records) == 0:
assert result.single(**kwargs) is None
else:
if len(records) == 1:
record = result.single(**kwargs)
else:
with pytest.warns(Warning, match="multiple"):
record = result.single(**kwargs)
assert isinstance(record, Record)
assert record.get("x") == records[0][0]
@pytest.mark.parametrize("records", ([[1], [2]], [[1]], []))
@pytest.mark.parametrize("fetch_size", (1, 2))
@mark_sync_test
def test_result_single_strict(records, fetch_size):
connection = ConnectionStub(records=Records(["x"], records))
result = Result(connection, HydratorStub(), fetch_size, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
try:
record = result.single(strict=True)
except ResultNotSingleError as exc:
assert len(records) != 1
if len(records) == 0:
assert exc is not None
assert "no records" in str(exc).lower()
elif len(records) > 1:
assert exc is not None
assert "more than one record" in str(exc).lower()
else:
assert len(records) == 1
assert isinstance(record, Record)
assert record.get("x") == records[0][0]
@pytest.mark.parametrize("records", (
[[1], [2], [3]], [[1]], [], [[i] for i in range(100)]
))
@pytest.mark.parametrize("fetch_size", (1, 2))
@pytest.mark.parametrize("strict", (True, False))
@mark_sync_test
def test_result_single_exhausts_records(records, fetch_size, strict):
connection = ConnectionStub(records=Records(["x"], records))
result = Result(connection, HydratorStub(), fetch_size, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result.single(strict=strict)
except ResultNotSingleError:
pass
assert not result.closed() # close has nothing to do with being exhausted
assert [r for r in result] == []
assert not result.closed()
@pytest.mark.parametrize("records", (
[[1], [2], [3]], [[1]], [], [[i] for i in range(100)]
))
@pytest.mark.parametrize("fetch_size", (1, 2))
@pytest.mark.parametrize("strict", (True, False))
@mark_sync_test
def test_result_fetch(records, fetch_size, strict):
connection = ConnectionStub(records=Records(["x"], records))
result = Result(connection, HydratorStub(), fetch_size, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
assert result.fetch(0) == []
assert result.fetch(-1) == []
assert [[r.get("x")] for r in result.fetch(2)] == records[:2]
assert [[r.get("x")] for r in result.fetch(1)] == records[2:3]
assert [[r.get("x")] for r in result] == records[3:]
@mark_sync_test
def test_keys_are_available_before_and_after_stream():
connection = ConnectionStub(records=Records(["x"], [[1], [2]]))
result = Result(connection, HydratorStub(), 1, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
assert list(result.keys()) == ["x"]
Util.list(result)
assert list(result.keys()) == ["x"]
@pytest.mark.parametrize("records", ([[1], [2]], [[1]], []))
@pytest.mark.parametrize("consume_one", (True, False))
@pytest.mark.parametrize("summary_meta", (None, {"database": "foobar"}))
@pytest.mark.parametrize("consume_times", (1, 2))
@mark_sync_test
def test_consume(records, consume_one, summary_meta, consume_times):
connection = ConnectionStub(
records=Records(["x"], records), summary_meta=summary_meta
)
result = Result(connection, HydratorStub(), 1, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
if consume_one:
try:
Util.next(Util.iter(result))
except StopIteration:
pass
for _ in range(consume_times):
summary = result.consume()
assert isinstance(summary, ResultSummary)
if summary_meta and "db" in summary_meta:
assert summary.database == summary_meta["db"]
else:
assert summary.database is None
server_info = summary.server
assert isinstance(server_info, ServerInfo)
assert server_info.protocol_version == Version(4, 3)
assert isinstance(summary.counters, SummaryCounters)
@pytest.mark.parametrize("t_first", (None, 0, 1, 123456789))
@pytest.mark.parametrize("t_last", (None, 0, 1, 123456789))
@mark_sync_test
def test_time_in_summary(t_first, t_last):
run_meta = None
if t_first is not None:
run_meta = {"t_first": t_first}
summary_meta = None
if t_last is not None:
summary_meta = {"t_last": t_last}
connection = ConnectionStub(
records=Records(["n"], [[i] for i in range(100)]), run_meta=run_meta,
summary_meta=summary_meta
)
result = Result(connection, HydratorStub(), 1, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
summary = result.consume()
if t_first is not None:
assert isinstance(summary.result_available_after, int)
assert summary.result_available_after == t_first
else:
assert summary.result_available_after is None
if t_last is not None:
assert isinstance(summary.result_consumed_after, int)
assert summary.result_consumed_after == t_last
else:
assert summary.result_consumed_after is None
assert not hasattr(summary, "t_first")
assert not hasattr(summary, "t_last")
@mark_sync_test
def test_counts_in_summary():
connection = ConnectionStub(records=Records(["n"], [[1], [2]]))
result = Result(connection, HydratorStub(), 1, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
summary = result.consume()
assert isinstance(summary.counters, SummaryCounters)
@pytest.mark.parametrize("query_type", ("r", "w", "rw", "s"))
@mark_sync_test
def test_query_type(query_type):
connection = ConnectionStub(
records=Records(["n"], [[1], [2]]), summary_meta={"type": query_type}
)
result = Result(connection, HydratorStub(), 1, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
summary = result.consume()
assert isinstance(summary.query_type, str)
assert summary.query_type == query_type
@pytest.mark.parametrize("num_records", range(0, 5))
@mark_sync_test
def test_data(num_records):
connection = ConnectionStub(
records=Records(["n"], [[i + 1] for i in range(num_records)])
)
result = Result(connection, HydratorStub(), 1, noop, noop)
result._run("CYPHER", {}, None, None, "r", None)
result._buffer_all()
records = result._record_buffer.copy()
assert len(records) == num_records
expected_data = []
for i, record in enumerate(records):
record.data = mock.Mock()
expected_data.append("magic_return_%s" % i)
record.data.return_value = expected_data[-1]
assert result.data("hello", "world") == expected_data
for record in records:
assert record.data.called_once_with("hello", "world")
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-12-12 14:18
from __future__ import unicode_literals
import db.deletion
from django.conf import settings
from django.db import migrations, models
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0010_auto_20161212_1418_c'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='tag',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Tag'),
),
migrations.AlterField(
model_name='tag',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='tag',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_tag', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tag',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_tag_version', to='share.TagVersion'),
),
migrations.AlterField(
model_name='tagversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_tagversion', to='share.Change'),
),
migrations.AlterField(
model_name='tagversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='tagversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='tagversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Tag'),
),
migrations.AlterField(
model_name='tagversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='award',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.Award'),
),
migrations.AlterField(
model_name='throughawards',
name='award_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AwardVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughawards', to='share.Change'),
),
migrations.AlterField(
model_name='throughawards',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughawards',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='funder',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughawards',
name='funder_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwards'),
),
migrations.AlterField(
model_name='throughawards',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwardsVersion'),
),
migrations.AlterField(
model_name='throughawards',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughawards', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughawards',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughawards_version', to='share.ThroughAwardsVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='award',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Award'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='award_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AwardVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughawardsversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='funder',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='funder_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwards'),
),
migrations.AlterField(
model_name='throughawardsversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughAwardsVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughcontributor', to='share.Change'),
),
migrations.AlterField(
model_name='throughcontributor',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughcontributor',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='related',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributor',
name='related_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributor'),
),
migrations.AlterField(
model_name='throughcontributor',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributorVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughcontributor', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughcontributor',
name='subject',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributor',
name='subject_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughcontributor',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughcontributor_version', to='share.ThroughContributorVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughcontributorversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='related',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='related_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributor'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughContributorVersion'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='subject',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelation'),
),
migrations.AlterField(
model_name='throughcontributorversion',
name='subject_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractAgentWorkRelationVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughsubjects', to='share.Change'),
),
migrations.AlterField(
model_name='throughsubjects',
name='creative_work',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='subject_relations', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughsubjects',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughsubjects',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjects'),
),
migrations.AlterField(
model_name='throughsubjects',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjectsVersion'),
),
migrations.AlterField(
model_name='throughsubjects',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughsubjects', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughsubjects',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughsubjects_version', to='share.ThroughSubjectsVersion'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughsubjectsversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='creative_work',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjects'),
),
migrations.AlterField(
model_name='throughsubjectsversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughSubjectsVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughtags', to='share.Change'),
),
migrations.AlterField(
model_name='throughtags',
name='creative_work',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='tag_relations', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughtags',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughtags',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTags'),
),
migrations.AlterField(
model_name='throughtags',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTagsVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_throughtags', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='throughtags',
name='tag',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='work_relations', to='share.Tag'),
),
migrations.AlterField(
model_name='throughtags',
name='tag_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='throughtags',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_throughtags_version', to='share.ThroughTagsVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_throughtagsversion', to='share.Change'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='creative_work',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTags'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ThroughTagsVersion'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='tag',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.Tag'),
),
migrations.AlterField(
model_name='throughtagsversion',
name='tag_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.TagVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_workidentifier', to='share.Change'),
),
migrations.AlterField(
model_name='workidentifier',
name='creative_work',
field=models.ForeignKey(on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='identifiers', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='workidentifier',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='extra',
field=models.OneToOneField(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='workidentifier',
name='extra_version',
field=models.OneToOneField(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifier'),
),
migrations.AlterField(
model_name='workidentifier',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifierVersion'),
),
migrations.AlterField(
model_name='workidentifier',
name='sources',
field=share.models.fields.TypedManyToManyField(editable=False, related_name='source_workidentifier', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='workidentifier',
name='version',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='share_workidentifier_version', to='share.WorkIdentifierVersion'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='change',
field=models.OneToOneField(editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='affected_workidentifierversion', to='share.Change'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='creative_work',
field=models.ForeignKey(db_index=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWork'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='creative_work_version',
field=models.ForeignKey(db_index=False, editable=False, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.AbstractCreativeWorkVersion'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='extra',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), to='share.ExtraData'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='extra_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.ExtraDataVersion'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='same_as',
field=models.ForeignKey(null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifier'),
),
migrations.AlterField(
model_name='workidentifierversion',
name='same_as_version',
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=db.deletion.DatabaseOnDelete(clause='CASCADE'), related_name='+', to='share.WorkIdentifierVersion'),
),
]
|
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""Base class for Telegram InputMedia Objects."""
from typing import Union, List, Tuple
from telegram import (
Animation,
Audio,
Document,
InputFile,
PhotoSize,
TelegramObject,
Video,
MessageEntity,
)
from telegram.utils.helpers import DEFAULT_NONE, parse_file_input
from telegram.utils.types import FileInput, JSONDict, ODVInput
class InputMedia(TelegramObject):
"""Base class for Telegram InputMedia Objects.
See :class:`telegram.InputMediaAnimation`, :class:`telegram.InputMediaAudio`,
:class:`telegram.InputMediaDocument`, :class:`telegram.InputMediaPhoto` and
:class:`telegram.InputMediaVideo` for detailed use.
"""
__slots__ = ()
caption_entities: Union[List[MessageEntity], Tuple[MessageEntity, ...], None] = None
def to_dict(self) -> JSONDict:
"""See :meth:`telegram.TelegramObject.to_dict`."""
data = super().to_dict()
if self.caption_entities:
data['caption_entities'] = [
ce.to_dict() for ce in self.caption_entities # pylint: disable=E1133
]
return data
class InputMediaAnimation(InputMedia):
"""Represents an animation file (GIF or H.264/MPEG-4 AVC video without sound) to be sent.
Note:
When using a :class:`telegram.Animation` for the :attr:`media` attribute. It will take the
width, height and duration from that video, unless otherwise specified with the optional
arguments.
Args:
media (:obj:`str` | `filelike object` | :obj:`bytes` | :class:`pathlib.Path` | \
:class:`telegram.Animation`): File to send. Pass a
file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP
URL for Telegram to get a file from the Internet. Lastly you can pass an existing
:class:`telegram.Animation` object to send.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
filename (:obj:`str`, optional): Custom file name for the animation, when uploading a
new file. Convenience parameter, useful e.g. when sending files generated by the
:obj:`tempfile` module.
.. versionadded:: 13.1
thumb (`filelike object` | :obj:`bytes` | :class:`pathlib.Path`, optional): Thumbnail of
the file sent; can be ignored if
thumbnail generation for the file is supported server-side. The thumbnail should be
in JPEG format and less than 200 kB in size. A thumbnail's width and height should
not exceed 320. Ignored if the file is not uploaded using multipart/form-data.
Thumbnails can't be reused and can be only uploaded as a new file.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
caption (:obj:`str`, optional): Caption of the animation to be sent, 0-1024 characters
after entities parsing.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`], optional): List of special
entities that appear in the caption, which can be specified instead of parse_mode.
width (:obj:`int`, optional): Animation width.
height (:obj:`int`, optional): Animation height.
duration (:obj:`int`, optional): Animation duration.
Attributes:
type (:obj:`str`): ``animation``.
media (:obj:`str` | :class:`telegram.InputFile`): Animation to send.
caption (:obj:`str`): Optional. Caption of the document to be sent.
parse_mode (:obj:`str`): Optional. The parse mode to use for text formatting.
caption_entities (List[:class:`telegram.MessageEntity`]): Optional. List of special
entities that appear in the caption.
thumb (:class:`telegram.InputFile`): Optional. Thumbnail of the file to send.
width (:obj:`int`): Optional. Animation width.
height (:obj:`int`): Optional. Animation height.
duration (:obj:`int`): Optional. Animation duration.
"""
__slots__ = (
'caption_entities',
'width',
'media',
'thumb',
'caption',
'duration',
'parse_mode',
'height',
'type',
)
def __init__(
self,
media: Union[FileInput, Animation],
thumb: FileInput = None,
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
width: int = None,
height: int = None,
duration: int = None,
caption_entities: Union[List[MessageEntity], Tuple[MessageEntity, ...]] = None,
filename: str = None,
):
self.type = 'animation'
if isinstance(media, Animation):
self.media: Union[str, InputFile] = media.file_id
self.width = media.width
self.height = media.height
self.duration = media.duration
else:
self.media = parse_file_input(media, attach=True, filename=filename)
if thumb:
self.thumb = parse_file_input(thumb, attach=True)
if caption:
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
if width:
self.width = width
if height:
self.height = height
if duration:
self.duration = duration
class InputMediaPhoto(InputMedia):
"""Represents a photo to be sent.
Args:
media (:obj:`str` | `filelike object` | :obj:`bytes` | :class:`pathlib.Path` | \
:class:`telegram.PhotoSize`): File to send. Pass a
file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP
URL for Telegram to get a file from the Internet. Lastly you can pass an existing
:class:`telegram.PhotoSize` object to send.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
filename (:obj:`str`, optional): Custom file name for the photo, when uploading a
new file. Convenience parameter, useful e.g. when sending files generated by the
:obj:`tempfile` module.
.. versionadded:: 13.1
caption (:obj:`str`, optional ): Caption of the photo to be sent, 0-1024 characters after
entities parsing.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`], optional): List of special
entities that appear in the caption, which can be specified instead of parse_mode.
Attributes:
type (:obj:`str`): ``photo``.
media (:obj:`str` | :class:`telegram.InputFile`): Photo to send.
caption (:obj:`str`): Optional. Caption of the document to be sent.
parse_mode (:obj:`str`): Optional. The parse mode to use for text formatting.
caption_entities (List[:class:`telegram.MessageEntity`]): Optional. List of special
entities that appear in the caption.
"""
__slots__ = ('caption_entities', 'media', 'caption', 'parse_mode', 'type')
def __init__(
self,
media: Union[FileInput, PhotoSize],
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
caption_entities: Union[List[MessageEntity], Tuple[MessageEntity, ...]] = None,
filename: str = None,
):
self.type = 'photo'
self.media = parse_file_input(media, PhotoSize, attach=True, filename=filename)
if caption:
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
class InputMediaVideo(InputMedia):
"""Represents a video to be sent.
Note:
* When using a :class:`telegram.Video` for the :attr:`media` attribute. It will take the
width, height and duration from that video, unless otherwise specified with the optional
arguments.
* ``thumb`` will be ignored for small video files, for which Telegram can easily
generate thumb nails. However, this behaviour is undocumented and might be changed
by Telegram.
Args:
media (:obj:`str` | `filelike object` | :obj:`bytes` | :class:`pathlib.Path` | \
:class:`telegram.Video`): File to send. Pass a
file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP
URL for Telegram to get a file from the Internet. Lastly you can pass an existing
:class:`telegram.Video` object to send.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
filename (:obj:`str`, optional): Custom file name for the video, when uploading a
new file. Convenience parameter, useful e.g. when sending files generated by the
:obj:`tempfile` module.
.. versionadded:: 13.1
caption (:obj:`str`, optional): Caption of the video to be sent, 0-1024 characters after
entities parsing.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`], optional): List of special
entities that appear in the caption, which can be specified instead of parse_mode.
width (:obj:`int`, optional): Video width.
height (:obj:`int`, optional): Video height.
duration (:obj:`int`, optional): Video duration.
supports_streaming (:obj:`bool`, optional): Pass :obj:`True`, if the uploaded video is
suitable for streaming.
thumb (`filelike object` | :obj:`bytes` | :class:`pathlib.Path`, optional): Thumbnail of
the file sent; can be ignored if
thumbnail generation for the file is supported server-side. The thumbnail should be
in JPEG format and less than 200 kB in size. A thumbnail's width and height should
not exceed 320. Ignored if the file is not uploaded using multipart/form-data.
Thumbnails can't be reused and can be only uploaded as a new file.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
Attributes:
type (:obj:`str`): ``video``.
media (:obj:`str` | :class:`telegram.InputFile`): Video file to send.
caption (:obj:`str`): Optional. Caption of the document to be sent.
parse_mode (:obj:`str`): Optional. The parse mode to use for text formatting.
caption_entities (List[:class:`telegram.MessageEntity`]): Optional. List of special
entities that appear in the caption.
width (:obj:`int`): Optional. Video width.
height (:obj:`int`): Optional. Video height.
duration (:obj:`int`): Optional. Video duration.
supports_streaming (:obj:`bool`): Optional. Pass :obj:`True`, if the uploaded video is
suitable for streaming.
thumb (:class:`telegram.InputFile`): Optional. Thumbnail of the file to send.
"""
__slots__ = (
'caption_entities',
'width',
'media',
'thumb',
'supports_streaming',
'caption',
'duration',
'parse_mode',
'height',
'type',
)
def __init__(
self,
media: Union[FileInput, Video],
caption: str = None,
width: int = None,
height: int = None,
duration: int = None,
supports_streaming: bool = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
thumb: FileInput = None,
caption_entities: Union[List[MessageEntity], Tuple[MessageEntity, ...]] = None,
filename: str = None,
):
self.type = 'video'
if isinstance(media, Video):
self.media: Union[str, InputFile] = media.file_id
self.width = media.width
self.height = media.height
self.duration = media.duration
else:
self.media = parse_file_input(media, attach=True, filename=filename)
if thumb:
self.thumb = parse_file_input(thumb, attach=True)
if caption:
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
if width:
self.width = width
if height:
self.height = height
if duration:
self.duration = duration
if supports_streaming:
self.supports_streaming = supports_streaming
class InputMediaAudio(InputMedia):
"""Represents an audio file to be treated as music to be sent.
Note:
When using a :class:`telegram.Audio` for the :attr:`media` attribute. It will take the
duration, performer and title from that video, unless otherwise specified with the
optional arguments.
Args:
media (:obj:`str` | `filelike object` | :obj:`bytes` | :class:`pathlib.Path` | \
:class:`telegram.Audio`):
File to send. Pass a
file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP
URL for Telegram to get a file from the Internet. Lastly you can pass an existing
:class:`telegram.Audio` object to send.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
filename (:obj:`str`, optional): Custom file name for the audio, when uploading a
new file. Convenience parameter, useful e.g. when sending files generated by the
:obj:`tempfile` module.
.. versionadded:: 13.1
caption (:obj:`str`, optional): Caption of the audio to be sent, 0-1024 characters after
entities parsing.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`], optional): List of special
entities that appear in the caption, which can be specified instead of parse_mode.
duration (:obj:`int`): Duration of the audio in seconds as defined by sender.
performer (:obj:`str`, optional): Performer of the audio as defined by sender or by audio
tags.
title (:obj:`str`, optional): Title of the audio as defined by sender or by audio tags.
thumb (`filelike object` | :obj:`bytes` | :class:`pathlib.Path`, optional): Thumbnail of
the file sent; can be ignored if
thumbnail generation for the file is supported server-side. The thumbnail should be
in JPEG format and less than 200 kB in size. A thumbnail's width and height should
not exceed 320. Ignored if the file is not uploaded using multipart/form-data.
Thumbnails can't be reused and can be only uploaded as a new file.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
Attributes:
type (:obj:`str`): ``audio``.
media (:obj:`str` | :class:`telegram.InputFile`): Audio file to send.
caption (:obj:`str`): Optional. Caption of the document to be sent.
parse_mode (:obj:`str`): Optional. The parse mode to use for text formatting.
caption_entities (List[:class:`telegram.MessageEntity`]): Optional. List of special
entities that appear in the caption.
duration (:obj:`int`): Duration of the audio in seconds.
performer (:obj:`str`): Optional. Performer of the audio as defined by sender or by audio
tags.
title (:obj:`str`): Optional. Title of the audio as defined by sender or by audio tags.
thumb (:class:`telegram.InputFile`): Optional. Thumbnail of the file to send.
"""
__slots__ = (
'caption_entities',
'media',
'thumb',
'caption',
'title',
'duration',
'type',
'parse_mode',
'performer',
)
def __init__(
self,
media: Union[FileInput, Audio],
thumb: FileInput = None,
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
duration: int = None,
performer: str = None,
title: str = None,
caption_entities: Union[List[MessageEntity], Tuple[MessageEntity, ...]] = None,
filename: str = None,
):
self.type = 'audio'
if isinstance(media, Audio):
self.media: Union[str, InputFile] = media.file_id
self.duration = media.duration
self.performer = media.performer
self.title = media.title
else:
self.media = parse_file_input(media, attach=True, filename=filename)
if thumb:
self.thumb = parse_file_input(thumb, attach=True)
if caption:
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
if duration:
self.duration = duration
if performer:
self.performer = performer
if title:
self.title = title
class InputMediaDocument(InputMedia):
"""Represents a general file to be sent.
Args:
media (:obj:`str` | `filelike object` | :obj:`bytes` | :class:`pathlib.Path` | \
:class:`telegram.Document`): File to send. Pass a
file_id to send a file that exists on the Telegram servers (recommended), pass an HTTP
URL for Telegram to get a file from the Internet. Lastly you can pass an existing
:class:`telegram.Document` object to send.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
filename (:obj:`str`, optional): Custom file name for the document, when uploading a
new file. Convenience parameter, useful e.g. when sending files generated by the
:obj:`tempfile` module.
.. versionadded:: 13.1
caption (:obj:`str`, optional): Caption of the document to be sent, 0-1024 characters after
entities parsing.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`], optional): List of special
entities that appear in the caption, which can be specified instead of parse_mode.
thumb (`filelike object` | :obj:`bytes` | :class:`pathlib.Path`, optional): Thumbnail of
the file sent; can be ignored if
thumbnail generation for the file is supported server-side. The thumbnail should be
in JPEG format and less than 200 kB in size. A thumbnail's width and height should
not exceed 320. Ignored if the file is not uploaded using multipart/form-data.
Thumbnails can't be reused and can be only uploaded as a new file.
.. versionchanged:: 13.2
Accept :obj:`bytes` as input.
disable_content_type_detection (:obj:`bool`, optional): Disables automatic server-side
content type detection for files uploaded using multipart/form-data. Always true, if
the document is sent as part of an album.
Attributes:
type (:obj:`str`): ``document``.
media (:obj:`str` | :class:`telegram.InputFile`): File to send.
caption (:obj:`str`): Optional. Caption of the document to be sent.
parse_mode (:obj:`str`): Optional. The parse mode to use for text formatting.
caption_entities (List[:class:`telegram.MessageEntity`]): Optional. List of special
entities that appear in the caption.
thumb (:class:`telegram.InputFile`): Optional. Thumbnail of the file to send.
disable_content_type_detection (:obj:`bool`): Optional. Disables automatic server-side
content type detection for files uploaded using multipart/form-data. Always true, if
the document is sent as part of an album.
"""
__slots__ = (
'caption_entities',
'media',
'thumb',
'caption',
'parse_mode',
'type',
'disable_content_type_detection',
)
def __init__(
self,
media: Union[FileInput, Document],
thumb: FileInput = None,
caption: str = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
disable_content_type_detection: bool = None,
caption_entities: Union[List[MessageEntity], Tuple[MessageEntity, ...]] = None,
filename: str = None,
):
self.type = 'document'
self.media = parse_file_input(media, Document, attach=True, filename=filename)
if thumb:
self.thumb = parse_file_input(thumb, attach=True)
if caption:
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
self.disable_content_type_detection = disable_content_type_detection
|
|
"""Tools for manipulating of large commutative expressions."""
import numbers
from collections import defaultdict
from ..utilities import default_sort_key, ordered, variations
from ..utilities.iterables import common_prefix, common_suffix
from .add import Add
from .basic import Basic, preorder_traversal
from .compatibility import is_sequence, iterable
from .containers import Dict, Tuple
from .coreerrors import NonCommutativeExpression
from .expr import Expr
from .mul import Mul, _keep_coeff
from .numbers import I, Integer, Number, Rational, oo
from .power import Pow
from .symbol import Dummy
from .sympify import sympify
def _isnumber(i):
return isinstance(i, (numbers.Integral, float)) or i.is_Number
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
This is strictly only valid if the exponent from which
the integer is extracted is itself an integer or the
base is positive. These conditions are assumed and not
checked here.
Examples
========
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.denominator))
exp = exp.numerator
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp == -1:
base, exp = Pow(base, tail), -1
elif exp != 1:
tail = _keep_coeff(Rational(1, exp.denominator), tail)
base, exp = Pow(base, tail), exp.numerator
else:
base, exp = expr, 1
return base, exp
class Factors:
"""Efficient representation of ``f_1*f_2*...*f_n``."""
def __init__(self, factors=None):
"""Initialize Factors from dict or expr.
Examples
========
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset({2, x})
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (numbers.Integral, float)):
factors = sympify(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors in (None, 1):
factors = {}
elif factors == 0:
factors = {Integer(0): Integer(1)}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[Integer(-1)] = Integer(1)
n = -n
if n.is_Float or n.is_Integer or n is oo:
factors[n] = Integer(1)
elif n.is_Rational and n != 1:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.numerator != 1:
factors[Integer(n.numerator)] = Integer(1)
factors[Integer(n.denominator)] = Integer(-1)
else: # pragma: no cover
raise ValueError(f'Expected Float|Rational|Integer, not {n}')
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: Integer(1)}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
if i:
factors[I] = Integer(1)*i
if nc:
factors[Mul(*nc, evaluate=False)] = Integer(1)
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = []
for k in factors:
if k in (I, -1, 1):
handle.append(k)
if handle:
i1 = Integer(1)
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 != 1 or i1.is_Float:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a == -1 and not a.is_Float:
factors[a] = Integer(1)
elif a is I:
factors[I] = Integer(1)
elif a.is_Pow:
if -1 not in factors:
factors[Integer(-1)] = Integer(0)
factors[Integer(-1)] += a.exp
elif a == 1:
factors[a] = Integer(1)
elif a == -1:
factors[-a] = Integer(1)
factors[Integer(-1)] = Integer(1)
else: # pragma: no cover
raise RuntimeError(f'unexpected factor in i1: {a}')
self.factors = factors
self.gens = frozenset(factors)
def __hash__(self):
keys = tuple(ordered(self.factors))
values = tuple(self.factors[k] for k in keys)
return hash((keys, values))
def __repr__(self):
return 'Factors({%s})' % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self):
"""
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and 0 in f
@property
def is_one(self):
"""
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self):
"""Return the underlying expression.
Examples
========
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
b, e = factor.as_base_exp()
if isinstance(exp, int):
e = _keep_coeff(Integer(exp), e)
elif isinstance(exp, Rational):
e = _keep_coeff(exp, e)
else:
e *= exp
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def mul(self, other):
"""Return Factors of ``self * other``.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(Integer(0))
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(), Factors(Integer(0))
if self.is_zero:
return Factors(Integer(0)), Factors()
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
assert r
self_factors[factor] = r
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(Integer(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> n, d = Factors(2**(2*x + 2)).div(Integer(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return Factors(Integer(0)), Factors()
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
else:
exp = -d
rem[factor] = exp
else:
r = quo[factor].extract_additively(exp)
if r is not None:
assert r
quo[factor] = r
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
else:
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other):
"""Return numerator Factor of ``self / other``.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other):
"""Return denominator Factors of ``self / other``.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other):
"""Return self raised to a non-negative integer power.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, numbers.Integral) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError(f'expected non-negative integer, got {other}')
def gcd(self, other):
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
factor, exp = sympify(factor), sympify(exp)
if factor in other.factors:
lt = (exp - other.factors[factor]).is_negative
if lt:
factors[factor] = exp
elif lt is False:
factors[factor] = other.factors[factor]
return Factors(factors)
def lcm(self, other):
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(Integer(0))
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other):
return self.mul(other)
def __divmod__(self, other):
return self.div(other)
def __truediv__(self, other):
return self.quo(other)
def __mod__(self, other):
return self.rem(other)
def __pow__(self, other):
return self.pow(other)
def __eq__(self, other):
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
class Term:
"""Efficient representation of ``coeff*(numer/denom)``."""
def __init__(self, term, numer=None, denom=None):
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression(
'commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = defaultdict(int), defaultdict(int)
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] += exp
else:
denom[base] += -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def as_expr(self):
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other):
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self):
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other):
return self.mul(other.inv())
def pow(self, other):
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other):
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other):
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other):
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __truediv__(self, other):
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
def __pow__(self, other):
if isinstance(other, numbers.Integral):
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def _gcd_terms(terms, isprimitive=False, fraction=True):
"""Helper function for :func:`gcd_terms`.
If ``isprimitive`` is True then the call to primitive
for an Add will be skipped. This is useful when the
content has already been extrated.
If ``fraction`` is True then the expression will appear over a common
denominator, the lcm of all term denominators.
"""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
terms = list(map(Term, (t for t in terms if t)))
# there is some simplification that may happen if we leave this
# here rather than duplicate it before the mapping of Term onto
# the terms
if len(terms) == 0:
return Integer(0), Integer(0), Integer(1)
if len(terms) == 1:
cont = terms[0].coeff
numer = terms[0].numer.as_expr()
denom = terms[0].denom.as_expr()
else:
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
if fraction:
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
else:
numers = [t.as_expr() for t in terms]
denom = Term(Integer(1)).numer
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True, fraction=True):
"""Compute the GCD of ``terms`` and put them together.
``terms`` can be an expression or a non-Basic sequence of expressions
which will be handled as though they are terms from a sum.
If ``isprimitive`` is True the _gcd_terms will not run the primitive
method on the terms.
``clear`` controls the removal of integers from the denominator of an Add
expression. When True (default), all numerical denominator will be cleared;
when False the denominators will be cleared only if all terms had numerical
denominators other than 1.
``fraction``, when True (default), will put the expression over a common
denominator.
Examples
========
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
>>> gcd_terms(x/2 + 1/x)
(x**2 + 2)/(2*x)
>>> gcd_terms(x/2 + 1/x, fraction=False)
(x + 2/x)/2
>>> gcd_terms(x/2 + 1/x, fraction=False, clear=False)
x/2 + 1/x
>>> gcd_terms(x/2/y + 1/x/y)
(x**2 + 2)/(2*x*y)
>>> gcd_terms(x/2/y + 1/x/y, fraction=False, clear=False)
(x + 2/x)/(2*y)
The ``clear`` flag was ignored in this case because the returned
expression was a rational expression, not a simple sum.
See Also
========
factor_terms, diofant.polys.polytools.terms_gcd
"""
def mask(terms):
"""Replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them.
"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
isadd = isinstance(terms, Add)
addlike = isadd or not isinstance(terms, Basic) and \
is_sequence(terms, include=set) and \
not isinstance(terms, Dict)
if addlike:
if isadd: # i.e. an Add
terms = list(terms.args)
else:
terms = sympify(terms)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive, fraction)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if not isinstance(terms, Basic):
return terms
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction)
for i in args]), clear=clear)
def handle(a):
# don't treat internal args like terms of an Add
if not isinstance(a, Expr):
return a.func(*[handle(i) for i in a.args])
return gcd_terms(a, isprimitive, clear, fraction)
if isinstance(terms, Dict):
return Dict(*[(k, handle(v)) for k, v in terms.args])
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
If fraction=True (default is False) then a common denominator will be
constructed for the expression.
If sign=True (default) then even if the only factor in common is a -1,
it will be factored out of the expression.
Examples
========
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When ``clear`` is False, a rational will only be factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
This only applies when there is a single Add that the coefficient
multiplies:
>>> factor_terms(x*y/2 + y, clear=True)
y*(x + 2)/2
>>> factor_terms(x*y/2 + y, clear=False) == _
True
If a -1 is all that can be factored out, to *not* factor it out, the
flag ``sign`` must be False:
>>> factor_terms(-x - y)
-(x + y)
>>> factor_terms(-x - y, sign=False)
-x - y
>>> factor_terms(-2*x - 2*y, sign=False)
-2*(x + y)
See Also
========
gcd_terms, diofant.polys.polytools.terms_gcd
"""
def do(expr):
is_iterable = iterable(expr)
if not isinstance(expr, Basic) and is_iterable:
return type(expr)([do(i) for i in expr])
if expr.is_Atom:
return expr
if expr.is_Pow or expr.is_Function or \
is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple(do(i) for i in args)
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical)
if p.is_Add:
list_args = [do(a) for a in Add.make_args(p)]
# get a common negative (if there) which gcd_terms does not remove
if all(a.as_coeff_Mul()[0] < 0 for a in list_args):
cont = -cont
list_args = [-a for a in list_args]
# watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2)
special = {}
for i, a in enumerate(list_args):
_, e = a.as_base_exp()
if e.is_Mul and e != Mul(*e.args):
list_args[i] = Dummy()
special[list_args[i]] = a
# rebuild p not worrying about the order which gcd_terms will fix
p = Add._from_args(list_args)
p = gcd_terms(p,
isprimitive=True,
clear=clear,
fraction=fraction).xreplace(special)
elif p.args:
p = p.func(
*[do(a) for a in p.args])
rv = _keep_coeff(cont, p, clear=clear, sign=sign)
return rv
expr = sympify(expr)
return do(expr)
def _mask_nc(eq, name=None):
"""
Return ``eq`` with non-commutative objects replaced with Dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is noncommutative
and cannot be made commutative. The third value returned is a list
of any non-commutative symbols that appear in the returned equation.
``name``, if given, is the name that will be used with numered Dummy
variables that will replace the non-commutative objects and is mainly
used for doctesting purposes.
Notes
=====
All non-commutative objects other than Symbols are replaced with
a non-commutative Symbol. Identical objects will be identified
by identical symbols.
If there is only 1 non-commutative object in an expression it will
be replaced with a commutative symbol. Otherwise, the non-commutative
entities are retained and the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Examples
========
>>> A, B, C = symbols('A B C', commutative=False)
One nc-symbol:
>>> _mask_nc(A**2 - x**2, 'd')
(-x**2 + _d0**2, {_d0: A}, [])
Multiple nc-symbols:
>>> _mask_nc(A**2 - B**2, 'd')
(A**2 - B**2, None, [A, B])
If there is an object that:
- doesn't contain nc-symbols
- but has arguments which derive from Expr
- and doesn't define an _eval_is_commutative routine
then it will give False (or None?) for the is_commutative test. Such
objects are also removed by this routine:
>>> eq = (1 + Mul(Expr(), Expr(), evaluate=False))
>>> eq.is_commutative is None
True
>>> _mask_nc(eq, 'd')
(_d0**2 + 1, {_d0: Expr()}, [])
"""
name = name or 'mask'
# Make Dummy() append sequential numbers to the name
def numbered_names():
i = 0
while True:
yield name + str(i)
i += 1
names = numbered_names()
def Dummy(*args, **kwargs):
from .symbol import Dummy
return Dummy(next(names), *args, **kwargs)
expr = eq
if expr.is_commutative:
return eq, {}, []
# identify nc-objects; symbols and other
rep = []
nc_obj = set()
nc_syms = set()
pot = preorder_traversal(expr, keys=default_sort_key)
for a in pot:
if any(a == r[0] for r in rep):
pot.skip()
elif not a.is_commutative:
if a.is_Symbol:
nc_syms.add(a)
elif not (a.is_Add or a.is_Mul or a.is_Pow):
if all(s.is_commutative for s in a.free_symbols):
rep.append((a, Dummy()))
else:
nc_obj.add(a)
pot.skip()
# If there is only one nc symbol, it can be factored regularly
# but polys is going to complain, so replace it with a Dummy.
if len(nc_syms) == 1 and not nc_obj:
rep.append((nc_syms.pop(), Dummy()))
# Any remaining nc-objects will be replaced with an nc-Dummy and
# identified as an nc-Symbol to watch out for
nc_obj = sorted(nc_obj, key=default_sort_key)
for n in nc_obj:
nc = Dummy(commutative=False)
rep.append((n, nc))
nc_syms.add(nc)
expr = expr.subs(rep)
nc_syms = list(nc_syms)
nc_syms.sort(key=default_sort_key)
return expr, {v: k for k, v in rep} or None, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
Examples
========
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from ..polys import factor, gcd
from ..simplify.simplify import powsimp
def _pemexpand(expr):
"""Expand with the minimal set of hints necessary to check the result."""
return expr.expand(deep=True, mul=True, power_exp=True,
power_base=False, basic=False, multinomial=True, log=False)
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = Integer(1)
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = Integer(1)
if c != 1:
hit = True
c, g = c.as_coeff_Mul()
if g != 1:
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
for i, (cc, _) in enumerate(args):
cc[0] = cc[0]/c
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = powsimp(factor(new_mid))
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
if e.is_Integer:
ncfac.extend([b]*e)
else:
ncfac.append(f)
pre_mid = g*Mul(*cfac)*l
target = _pemexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _pemexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
# matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
from nilmtk import DataSet
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 5000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 1024
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['washer dryer', 'washing machine'],
'hair straighteners',
'television',
'dish washer',
['fridge freezer', 'fridge', 'freezer']
],
max_appliance_powers=[2400, 500, 200, 2500, 200],
# max_input_power=200, = 5800
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[1800, 60, 60, 1800, 60],
min_off_durations=[600, 12, 12, 1800, 12],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0.2,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
# input_stats =
# {'std': array([ 0.17724811], dtype=float32), 'mean': array([ 0.13002439], dtype=float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
20000: 1e-3,
40000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_o(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
learning_rate=1e-1,
learning_rate_changes_by_iteration={}
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense1',
'type': DenseLayer,
'num_units': 1021,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 1021 * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, 1021, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
os.chdir('/data/dk3810/figures/e446o/')
net = exp_o('e446o')
net.compile()
net.load_params(50000, '/data/dk3810/figures/e446o/e446o.hdf5')
dataset = DataSet('/data/dk3810/ukdale.h5')
dataset.set_window("2013-06-01", "2014-07-01")
elec = dataset.buildings[1].elec
elec.use_alternative_mains()
mains = elec.mains().power_series_all_data()
washer = elec['washer dryer'].power_series_all_data()
N = 131072
estimates = disaggregate(mains.values[:N], net)
fig, axes = plt.subplots(3, 1, sharex=True)
axes[0].plot(mains[:N].index, estimates)
axes[1].plot(mains[:N].index, mains[:N])
axes[2].plot(washer[:N].index, washer[:N])
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e447.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
|
#!/usr/bin/env python
import sys,os
import pandas as pd
import re
import math
def main():
"""
NAME
squidm_magic.py
DESCRIPTION
Converts SQUID microscopy files into MagIC format files.
This program currently supports only specimens from a single sample.
Method codes are applied to all rows in each table. To add method codes
to individual rows, edit the MagIC file after this program creates it.
A standard MagiC file is created for the location through specimen tables,
along with measurement data from the model fits. The SQUID microscopy
measurement data is placed in individual files for each experiment in the
compact MagIC file format. These measurement files are placed in a
directory called "measurements".
This program requires a specific directory/file structure to function.
The top directory should contain directories named for each of the SQUID
microscopy scan groups. This name will be used as the specimen name in
the MagIC specimen table that is created by this program. In each scan
directory there should be directories labeled "demag", "data", "images". The
data directory should contain the .bz, .inf, and .fits files for each SQUID
microscopy scan. The .bz, .inf, and .fits files must have the same name. The
demag directory should have a .sam file that has the same name as the
name of the directory 2 levels up. That is, if the main directory is ZirconRun2,
then the .sam file should be named ZirconRun2.sam. There should also be
the CIT formatted files with the model demag data in them along with an
associated _fits.txt file that has information about how well the model
dipole fits the data. In the images directory should be any human viewable
image files that will be uploaded to MagIC along with the data. .jpg, .png,
.gif, .pdf, etc. type files.
The .fits file format is: moment in emu, declination, inclination, height, residuals
An example of the text in a .fits file:
1.51e-10,54.36,220.92,0.000429,0.4214
Example file directory tree:
ZirconRun1 -- (file hierarchy)
ZirconRun2 -- data -- run2N_140C_100k.bz
-- run2N_140C_100k.fits
-- run2N_140C_100k.inf
-- run2N_173C_100k.bz
-- run2N_173C_100k.fits
-- run2N_173C_100k.inf
-- demag -- 57-1-3
-- 57-19-12
-- ZirconRun2.sam
-- images -- run2N_140C_100k.pdf
-- run2S_89G_1M.pdf
ZirconRun3 -- (file hierarchy)
SYNTAX
mit_squid_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-meas_name_num: set the starting number for the measurement name. Default:1
-location: specify location/study name
-location_type: specify location_type
see https://www.earthref.org/vocabularies/controlled for list
-geologic_classes: specify geologic_classes (":" between multiple entries).
see https://www.earthref.org/vocabularies/controlled for list
-lithologies: specify lithologies. (":" between multiple entries)
see https://www.earthref.org/vocabularies/controlled for list
-lat: specify the latitude of the site.
-lon: specify longitude of the site.
-age: specify the age of the site.
One must have an age defined. age_low and age_high can be used in
addition to or in place of age.
-age_sigma: specify the one sigma error on the age.
-age_low: specify the low bound for the age.
-age_high: specify the high bound for the age.
-age_unit: specify the age unit. ka, Ma, Ga are some examples.
see https://www.earthref.org/vocabularies/controlled for list
-citations: list of citations (":" between entries). default: "This study".
"This study" can be used for the study this MagIC contribution
will be associated with. Will be added when the data is published.
Use DOIs for other studies.
-site: site name for the sample that the scan slides were made from
-geologic_types: geologic types of the site. (":" between multiple entries)
-sample: sample name from which the slides were made from
-loc_method_codes: method_codes used for all locations
(":" between multiple entries)
Recommend
-site_method_codes: method_codes used for all sites
(":" between multiple entries)
Required
-samp_method_codes: method_codes used for all samples
(":" between multiple entries)
Required
-spec_method_codes: method_codes used for all specimens. Put LP-NOMAG method code last, if used.
(":" between multiple entries)
Required
-meas_method_codes: method_codes used for all measurements
(":" between multiple entries)
LP-SQUIDM will be automatically added to the measurements method
code list if not already in the provided list of codes
-instrument_codes: used to identify the instrument that made the measurement.
Exact instrument name prefered, not type.
(":" between multiple entries)
-model_height_name: name from the MagIC "Derived Values" controtrolled vocabulary for the model used
to calculate the model height.
Should correspond to the model_residuals_name
-model_residuals_name: name from the MagIC "Derived Values" controtrolled vocabulary for the model used
to calculate the model residuals.
Should correspond to the model_height_name
-model_doi: doi reference for the model used to calculate the model height, and residuals
-A: don't average replicant measurements
-multi_samples: flag used to indicate to not remove the MagIC files upon finishing. This leaves the
MagIC files to be concatenated by another program when there are multiple samples in
the study.
-meas_num: set the starting measurement name number. default:1
-labfield: the field strength that the sample was magnetized for an in-field step
in microTesla. default:0.0
-phi: Angle between the specimen x-y plane and the dc field direction.
Positive toward the positive z direction.
-theta: Angle of the dc field direction when projected into the x-y plane.
Positive x-axis is 0 and increasing toward the positive y-axis.
-ncn NCON: specify naming convention for the CIT sample files.
Sample naming convention (NCON):
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitrary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitrary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in sitename column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your self or e-mail [email protected] for help.
Example command for the example data file. Data from Weiss et al., 2018 (doi:10.1130/G39938.1):
squidm_magic.py -location "Jack Hills" -location_type "Outcrop" -geologic_classes "Metamorphic" -lithologies "Metaconglomerate" -geologic_types "Single Crystal" -lat "-26" -lon 117 -age_low 0.8 -age_high 2.6 -age_unit Ga -citations "10.1130/G39938.1" -site "Erawandoo Hill" -loc_method_codes "GM-UPB" -site_method_codes "GM-UPB" -samp_method_codes "SC-SQUIDM" -spec_method_codes "SC-SQUIDM" -geologic_types "Single Crystal" -sample RSES-57 -ncn 5 -instrument_codes "MIT SQUID microscope" -model_height_name "SQUID Microscopy Model Height Lima And Weiss 2016" -model_residuals_name "SQUID Microscopy Residuals Lima And Weiss 2016" -model_doi "10.1002/2016GC006487" -labfield 50.0 -phi 0.0 -theta 90 -A
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-meas_name_num' in sys.argv:
ind=sys.argv.index('-meas_name_num')
meas_name_num=int(sys.argv[ind+1])
else:
meas_name_num=1
if '-location' in sys.argv:
ind=sys.argv.index('-location')
location=sys.argv[ind+1]
else:
print("The location/study name must be set with the -location flag")
exit()
if '-location_type' in sys.argv:
ind=sys.argv.index('-location_type')
location_type=sys.argv[ind+1]
else:
print("The location_type name must be set with the -location_type flag")
exit()
if '-geologic_classes' in sys.argv:
ind=sys.argv.index('-geologic_classes')
geologic_classes=sys.argv[ind+1]
else:
print("The geologic classes must be set with the -geologic_classes flag")
exit()
if '-lithologies' in sys.argv:
ind=sys.argv.index('-lithologies')
lithologies=sys.argv[ind+1]
else:
print("The litothologies must be set with the -lithologies flag")
exit()
if '-lat' in sys.argv:
ind=sys.argv.index('-lat')
lat=sys.argv[ind+1]
else:
print("The latitude must be set with the -lat flag")
exit()
if '-lon' in sys.argv:
ind=sys.argv.index('-lon')
lon=sys.argv[ind+1]
else:
print("The longitude must be set with the -lon flag")
exit()
if '-age' in sys.argv:
ind=sys.argv.index('-age')
age=sys.argv[ind+1]
else:
age=""
if '-age_sigma' in sys.argv:
ind=sys.argv.index('-age_sigma')
age_sigma=sys.argv[ind+1]
else:
age_sigma=""
if '-age_low' in sys.argv:
ind=sys.argv.index('-age_low')
age_low=sys.argv[ind+1]
else:
age_low=""
if '-age_high' in sys.argv:
ind=sys.argv.index('-age_high')
age_high=sys.argv[ind+1]
else:
age_high=""
if '-age_unit' in sys.argv:
ind=sys.argv.index('-age_unit')
age_unit=sys.argv[ind+1]
else:
print("The age unit must be set with the -ageunit flag")
exit()
if '-citations' in sys.argv:
ind=sys.argv.index('-citations')
citations=sys.argv[ind+1]
else:
citations="This study"
if '-loc_method_codes' in sys.argv:
ind=sys.argv.index('-loc_method_codes')
loc_method_codes=sys.argv[ind+1]
else:
loc_method_codes=""
if '-site_method_codes' in sys.argv:
ind=sys.argv.index('-site_method_codes')
site_method_codes=sys.argv[ind+1]
else:
print("method code(s) for the site must be set with the -site_method_code flag")
exit()
if '-samp_method_codes' in sys.argv:
ind=sys.argv.index('-samp_method_codes')
samp_method_codes=sys.argv[ind+1]
else:
print("method code(s) for the sample must be set with the -samp_method_code flag")
exit()
if '-spec_method_codes' in sys.argv:
ind=sys.argv.index('-spec_method_codes')
spec_method_codes=sys.argv[ind+1]
else:
print("method code(s) for the specimen must be set with the -specimen_method_code flag")
exit()
if '-meas_method_codes' in sys.argv:
ind=sys.argv.index('-meas_method_codes')
meas_method_codes=sys.argv[ind+1]
if 'LP-SQUIDM' not in meas_method_codes:
meas_method_code=meas_method_codes+':LP-SQUIDM'
else:
meas_method_codes='LP-SQUIDM'
if '-instrument_codes' in sys.argv:
ind=sys.argv.index('-instrument_codes')
instrument_codes=sys.argv[ind+1]
else:
instrument_codes=""
if '-model_height_name' in sys.argv:
ind=sys.argv.index('-model_height_name')
model_height_name=sys.argv[ind+1]
else:
print("The model height name must be set with the -model_height_name flag")
exit()
if '-model_residuals_name' in sys.argv:
ind=sys.argv.index('-model_residuals_name')
model_residuals_name=sys.argv[ind+1]
else:
print("The model residuals name must be set with the -model_residuals_name flag")
exit()
if '-model_doi' in sys.argv:
ind=sys.argv.index('-model_doi')
model_doi=sys.argv[ind+1]
else:
print("The model doi must be set with the -model_doi flag")
exit()
if '-site' in sys.argv:
ind=sys.argv.index('-site')
site=sys.argv[ind+1]
else:
print("The site name must be set with the -site flag")
exit()
if '-geologic_types' in sys.argv:
ind=sys.argv.index('-geologic_types')
geologic_types=sys.argv[ind+1]
else:
print("The geologic types must be set with the -geologic_types flag")
exit()
if '-sample' in sys.argv:
ind=sys.argv.index('-sample')
sample=sys.argv[ind+1]
else:
print("The site name must be set with the -sample flag")
exit()
if '-oe' in sys.argv:
oe=' -oe '
else:
oe=''
if '-A' in sys.argv:
average='-A'
else:
average=''
if '-multi_samples' in sys.argv:
multi_samples=True
else:
multi_samples=False
if '-meas_num' in sys.argv:
ind=sys.argv.index('-meas_num')
meas_num=int(sys.argv[ind+1])
else:
if multi_samples:
if os.path.isfile('../last_measurement_number'):
f=open('../last_measurement_number','r')
meas_num=int(f.readline())
f.close()
else:
meas_num=1
else:
meas_num=1
if '-labfield' in sys.argv:
ind=sys.argv.index('-labfield')
labfield=sys.argv[ind+1]
else:
dc_field='0.0'
if '-phi' in sys.argv:
ind=sys.argv.index('-phi')
phi=sys.argv[ind+1]
else:
phi='0.0'
if '-theta' in sys.argv:
ind=sys.argv.index('-theta')
theta=sys.argv[ind+1]
else:
theta='0.0'
if '-ncn' in sys.argv:
ind=sys.argv.index('-ncn')
ncn=sys.argv[ind+1]
else:
print("Setting the sample name convention with the -ncn flag is required")
exit()
# Run cit_magic.py on all slides to process the psudo-Thellier data
# Format and combine the MagIC files from each slide into one MagIC file
# Create measurementXX.txt files for each slide scan by translating the data into the MagIC format
print("start")
os.system("rm locations.txt sites.txt samples.txt specimens.txt measurements.txt")
os.system("rm *.txt ") # for debugging
os.system("rm -r images")
os.system("rm -r measurements")
dir_list=os.listdir()
print('dir_list=',sorted(dir_list))
slide_dir_list=[]
image_dir_list=[]
specimen_list=[]
# create locations table
df=pd.DataFrame(columns=["location","location_type","geologic_classes","lithologies","lat_n","lat_s","lon_w","lon_e","age_unit","citations","method_codes"],data=[[location,location_type,geologic_classes,lithologies,lat,lat,lon,lon,age_unit,citations,loc_method_codes]])
if age!="":
df["age"]=[age]
if age_high!="":
df["age_high"]=[age_high]
if age_low!="":
df["age_low"]=[age_low]
print(df)
df.to_csv("locations.txt",sep='\t',index=False)
add_head("locations")
# create sites table
df=pd.DataFrame(columns=["site","location","geologic_classes","geologic_types","lithologies","lat","lon","age_unit","citations","method_codes"],data=[[site,location,geologic_classes,geologic_types,lithologies,lat,lon,age_unit,citations,site_method_codes]])
if age!="":
df["age"]=[age]
if age_high!="":
df["age_high"]=[age_high]
if age_low!="":
df["age_low"]=[age_low]
print(df)
df.to_csv("sites.txt",sep='\t',index=False)
add_head("sites")
# create samples table
df=pd.DataFrame(columns=["sample","site","lat","lon","geologic_classes","geologic_types","lithologies","citations","method_codes"],data=[[sample,site,lat,lon,geologic_classes,geologic_types,lithologies,citations,samp_method_codes]])
df.to_csv("samples.txt",sep='\t',index=False)
add_head("samples")
meas_num=meas_num
for dir in sorted(dir_list):
if dir[0] == '.': # skip . files added by MacOS
continue
elif dir == 'command': # skip command file
continue
elif dir == 'log': # skip log file - used during debugging
continue
specimen=dir
slide_dir_list.append(dir+'/demag/')
specimen_list.append(dir)
# print("specimen_list",specimen_list)
# create images.txt file when images directories are present
if os.path.isdir(dir+'/images/'):
image_dir_list.append(dir+'/images/')
os.chdir(dir+'/images')
if os.path.isfile("images.txt"):
os.system("rm images.txt")
image_file_names=os.listdir()
f_images=open('images.txt','w')
f_images.write('tab\timages\n')
f_images.write('specimen\tfile\ttype\ttitle\tkeywords\n')
for file_name in image_file_names:
title_split=file_name.split(".")
title=title_split[0]
f_images.write(dir+'\t'+file_name+'\tScanning SQUID Microscopy\t'+title+'\tScanning SQUID Microscopy\n')
f_images.close()
print("image_dir_list",image_dir_list)
os.chdir('../..')
# create MagIC files from cit files
os.chdir(dir+'/demag')
command='cit_magic.py -ncn ' + ncn + ' -f ' + dir + '.sam -loc "' + location + '" -sn "' + site + '" -sampname "' + sample + '" -dc ' + labfield + ' ' + phi + ' ' + theta + ' ' + average
if spec_method_codes != "":
command+=' -mcd ' + spec_method_codes
print(command)
os.system(command)
# add info to specimens table
df=pd.read_csv("specimens.txt",sep="\t",header=1)
df=append_to_column(df,"method_codes",spec_method_codes)
df=update_column(df,"citations",citations)
df=update_column(df,"geologic_classes",geologic_classes)
df=update_column(df,"lithologies",lithologies)
df=update_column(df,"geologic_types",geologic_types)
df.to_csv("specimens.txt",sep='\t',index=False)
add_head("specimens")
# add info to measurements table
df=pd.read_csv("measurements.txt",sep="\t",header=1)
df=append_to_column(df,"method_codes",meas_method_codes)
df=update_column(df,"citations",citations)
df=update_column(df,"instrument_codes",instrument_codes)
df.to_csv("measurements.txt",sep='\t',index=False)
add_head("measurements")
# Create the large MagIC measurement files for the raw QDM data scans
os.chdir('../data')
os.system('rm measurements*.txt')
meas_num,meas_name_num=convert_squid_data(specimen,citations,meas_num,meas_method_codes,meas_name_num,model_height_name,model_residuals_name,model_doi)
os.system('mv measurements*.txt ../../')
os.chdir('../../')
# move all the measurement files to one folder
os.system("mkdir measurements")
os.system("mv measurements[0-9]*.txt measurements")
# Combine the images tables and put the images in one folder
image_files=""
print("XXXXXXXXXXXXXXXXXXXXXXXXXXX")
print("image dir list=",image_dir_list)
print("XXXXXXXXXXXXXXXXXXXXXXXXXXX")
for dir in image_dir_list:
image_files+=dir+ "images.txt "
os.system("combine_magic.py -F images.txt -f " + image_files)
os.mkdir("images")
for dir in image_dir_list:
os.system("cp " + dir + "* images")
# Create files for combining into sites and samples tables for the image info
tab="\t"
geologic_types=geologic_types
f=open("images/sites.txt","w")
f.write("tab\tsites\n")
f.write("site\tlocation\tlat\tlon\tcitations\tgeologic_classes\tlithologies\tage_high\tage_low\tage_unit\tmethod_codes\tgeologic_types\n")
f.write(site + tab + location + tab + lat + tab + lon + tab + citations + tab + geologic_classes + tab + lithologies + tab +age_high + tab + age_low + tab + age_unit + tab + site_method_codes + tab + geologic_types + "\n")
f.close()
f=open("images/samples.txt","w")
f.write("tab\tsamples\n")
f.write("sample\tsite\tgeologic_classes\tgeologic_types\tlithologies\tcitations\tmethod_codes\tlat\tlon\n")
f.write(sample + tab + site + tab + geologic_classes + tab + geologic_types + tab + lithologies + tab + citations + tab + samp_method_codes + tab + lat + tab + lon + "\n")
f.close()
print("Creating specimens header file for images specimens")
f=open("images/specimens.txt","w")
f.write("tab\tspecimens\n")
f.write("specimen\tsample\tcitations\tmethod_codes\tgeologic_classes\tgeologic_types\tlithologies\n")
f.close()
# Create files lists for combining the MagIC data files
print("slide dir list=",slide_dir_list)
site_files="sites.txt images/sites.txt "
samp_files="samples.txt images/samples.txt "
spec_files="specimens.txt images/specimens.txt "
meas_files=""
for dir in slide_dir_list:
spec_files+=dir+"specimens.txt "
meas_files+=dir+"measurements.txt "
# Also add the specimen names for the scan slides to the specimen table
for fdf in specimen_list:
f=open("images/specimens.txt","a")
f.write(fdf + tab + sample + tab + citations + tab + spec_method_codes + tab + geologic_classes + tab + geologic_types + tab + lithologies + "\n")
f.close()
os.system("combine_magic.py -F specimens.txt -f " + spec_files)
os.system("combine_magic.py -F measurements.txt -f " + meas_files)
os.system("upload_magic.py")
# Remove MagIC files
for dir in slide_dir_list:
os.system("rm " + dir + "locations.txt")
os.system("rm " + dir + "sites.txt")
os.system("rm " + dir + "samples.txt")
os.system("rm " + dir + "specimens.txt")
os.system("rm " + dir + "measurements.txt")
print("multi_samples=",multi_samples)
if multi_samples == False:
os.system("rm locations.txt sites.txt samples.txt specimens.txt measurements.txt images.txt")
os.system("rm images/sites.txt images/samples.txt images/specimens.txt images/images.txt")
f=open("../last_measurement_number","w")
f.write(str(meas_num))
f.close()
print("end")
return()
def convert_squid_data(specimen,citations,meas_num,meas_method_codes,meas_name_num,model_height_name,model_residuals_name,model_doi):
# Take the SQUID magnetometer files and make a MagIC measurement file. This data will not be uploaded
# in the contribution MagIC data file due is large size, but will be available for download.
# These have to be uploaded by hand for now.
# Each scan's data is put in a seperate measurements.txt file and the files are put in a
# separate directory.
file_list=os.listdir()
print(sorted(file_list))
for file in sorted(file_list):
if file[0] == '.': # skip . files added by MacOS
continue
if '.inf' in file: # skip .inf files process all files in the .bz loop as we need all the data to create the measurements file
continue
if '.fits' in file: # skip .inf files process all files in the .bz loop as we need all the data to create the measurements file
continue
print('file=',file)
data_name=file
info_name=file[:-3]+ '.inf'
# Parse the .inf file
info = open(info_name, encoding="utf8", errors='ignore') # data files have some non-utf8 characters that are ignored
line=info.readline()
line=info.readline()
line=info.readline()
initial_corner=line.split('(')
x_start=float(initial_corner[1].split()[0])
y_start=float(initial_corner[1].split()[2])
x_start=1e-3*x_start # convert mm to meters
y_start=1e-3*y_start # convert mm to meters
print("x_start=",x_start)
print("y_start=",y_start)
line=info.readline()
end_corner=line.split('(')
x_end=float(end_corner[1].split()[0])
y_end=float(end_corner[1].split()[2])
x_end=1e-3*x_end # convert mm to meters
y_end=1e-3*y_end # convert mm to meters
print("x_end=",x_end)
print("y_end=",y_end)
line=info.readline()
line=info.readline()
line=info.readline()
line=info.readline()
x_step_line=line.split()
x_step=float(x_step_line[3])
x_step=1e-6*x_step #convert micrometers to meters
print("x_step",x_step)
line=info.readline()
y_step_line=line.split()
y_step=float(y_step_line[3])
y_step=1e-6*y_step #convert micrometers to meters
print("y_step",y_step)
line=info.readline()
line=info.readline()
comment=line[4:-1]
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
num_points_line=line.split()
num_points=float(num_points_line[3])
comment=comment+", "+line[4:-1]
print ("num_points=",num_points)
line=info.readline()
line=info.readline()
line=info.readline()
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
line=info.readline()
line=info.readline()
line=info.readline()
calibration_factor_line=line.split()
calibration_factor=float(calibration_factor_line[2])
calibration_factor=1e-9*calibration_factor # convert nanoTesla to Tesla
comment=comment+", "+line[4:-1]
print ("calibration_factor=",calibration_factor)
line=info.readline()
line=info.readline()
line=info.readline()
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
comment=comment+", "+line[4:-1]
line=info.readline()
comment=comment+", "+line[4:-1]
print ("comment=",comment)
line=info.readline()
info.close()
experiment_name=file.split('.')
experiment_name=experiment_name[0]
# get the model height and residuals information from the .fits file
fits_name=file[:-3]+ '.fits'
f=open(fits_name,'r')
line=f.readline()
line_split=line.split(",")
# fit file values are moment (emu), inc, dec, height and residuals
height=line_split[3]
residuals=line_split[4]
residuals=residuals.strip()
# open the measurement file for writing and put the compressed headers in
mf=open('measurements'+str(meas_num)+'.txt','w')
mf.write("tab\tmeasurements\n")
mf.write('* experiment\t'+experiment_name+'\n')
mf.write('* specimen\t'+specimen+'\n')
mf.write('* standard\tu\n')
mf.write('* quality\tg\n')
mf.write('* method_codes\t'+meas_method_codes+'\n')
mf.write('* citations\t'+citations+'\n')
mf.write('* description\t'+comment+'\n')
mf.write('* derived_value\t'+model_height_name+','+height+','+model_doi+';'+model_residuals_name+','+residuals+','+model_doi+'\n')
mf.write('measurement\tmagn_z\tmeas_pos_x\tmeas_pos_y\n')
print('meas_num=', meas_num)
print('')
meas_num+=1
prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z") #for rounding
qdm_data=open(data_name,'r')
line=qdm_data.readline()
y=y_start
while line != "":
str_y=stringify(y*y_step)
str_y=remove_extra_digits(str_y, prog) #fix float to str problems
values=line.split()
x=x_start
for value in values:
str_x=stringify(x*x_step)
str_x=remove_extra_digits(str_x, prog)
value=float(value)*calibration_factor
# fix rounding problems with exponentials and zeros
str_value=str(value)
if 'e' in str_value:
split_value=str_value.split('e')
str_val_num=split_value[0]
if '0000000' in str_value:
str_val_num_split=str_val_num.split('0000000')
if str_val_num_split[1] == '': str_val_num_split[1]='0'
if int(str_val_num_split[1]) < 10:
str_value=str_val_num_split[0]+'e'+ split_value[1]
measurement_line=str(meas_name_num)+'\t'+str_value+'\t'+str_x+'\t'+str_y+'\n'
mf.write(measurement_line)
x+=1
meas_name_num+=1
y+=1
line = qdm_data.readline()
qdm_data.close()
mf.close()
return(meas_num,meas_name_num)
def update_column(df,column,value):
#add the column with all the same values to a DataFrame
column_values = []
for i in df.iterrows():
column_values.append(value)
# print ("column=", column)
# print ("column_values=", column_values)
df[column] = column_values
return(df)
def append_to_column(df,column,value):
# add value to all of the values in column except when the value already is in the original value
for index, row in df.iterrows():
value_list = value.split(':')
for method_code in value_list:
if method_code not in df.loc[index,column]:
df.loc[index,column]= method_code + ":" + df.loc[index,column]
return(df)
def add_head(table):
# Add the the magic file format header to a data file given the table name
file_name=table+".txt"
f=open(file_name,"r")
f_before=f.read()
f.close()
f_after="tab\t" + table+"\n"+f_before
f=open(file_name,"w")
f.write(f_after)
f.close()
def stringify(x):
# float --> string,
# truncating floats like 3.0 --> 3
if isinstance(x, float):
if x.is_integer():
#print('{} --> {}'.format(x, str(x).rstrip('0').rstrip('.')))
return str(x).rstrip('0').rstrip('.')
return(str(x))
# keep strings as they are,
# unless it is a string like "3.0",
# in which case truncate that too
if isinstance(x, str):
try:
float(x)
if x.endswith('0'):
if x.rstrip('0').endswith('.'):
#print('{} --> {}'.format(x, x.rstrip('0').rstrip('.')))
return x.rstrip('0').rstrip('.')
except (ValueError, TypeError):
pass
# integer --> string
if isinstance(x, int):
return str(x)
# if it is not int/str/float, just return as is
return x
def remove_extra_digits(x, prog):
"""
Remove extra digits
x is a string,
prog is always the following '_sre.SRE_Pattern':
prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z").
However, it is compiled outside of this sub-function
for performance reasons.
"""
if not isinstance(x, str):
return x
result = prog.match(x)
if result:
decimals = result.string.split('.')[1]
result = result.string
if decimals[-3] == '0':
result = x[:-2].rstrip('0')
if decimals[-3] == '9':
result = x[:-2].rstrip('9')
try:
last_digit = int(result[-1])
result = result[:-1] + str(last_digit + 1)
except ValueError:
result = float(result[:-1]) + 1
#if result != x:
# print('changing {} to {}'.format(x, result))
return result
return x
def do_help():
"""
returns help string of script
"""
return __doc__
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSBridgeTestCase(base.BaseOVSLinuxTestCase):
# TODO(twilson) So far, only ovsdb-related tests are written. It would be
# good to also add the openflow-related functions
def setUp(self):
super(OVSBridgeTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def create_ovs_port(self, *interface_attrs):
# Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default
attrs = collections.OrderedDict(interface_attrs)
attrs.setdefault('type', 'internal')
port_name = net_helpers.get_rand_port_name()
return (port_name, self.br.add_port(port_name, *attrs.items()))
def create_ovs_vif_port(self, iface_id=None, mac=None,
iface_field='iface-id'):
if iface_id is None:
iface_id = base.get_rand_name()
if mac is None:
mac = base.get_rand_name()
attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
port_name, ofport = self.create_ovs_port(attrs)
return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
def test_port_lifecycle(self):
(port_name, ofport) = self.create_ovs_port(('type', 'internal'))
# ofport should always be an integer string with value -1 or > 0.
self.assertTrue(int(ofport))
self.assertTrue(int(self.br.get_port_ofport(port_name)))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual(self.br.br_name,
self.br.get_bridge_for_iface(port_name))
self.br.delete_port(port_name)
self.assertFalse(self.br.port_exists(port_name))
def test_duplicate_port_may_exist_false(self):
port_name, ofport = self.create_ovs_port(('type', 'internal'))
cmd = self.br.ovsdb.add_port(self.br.br_name,
port_name, may_exist=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_delete_port_if_exists_false(self):
cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_replace_port(self):
port_name = net_helpers.get_rand_port_name()
self.br.replace_port(port_name, ('type', 'internal'))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('internal',
self.br.db_get_val('Interface', port_name, 'type'))
self.br.replace_port(port_name, ('type', 'internal'),
('external_ids', {'test': 'test'}))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('test', self.br.db_get_val('Interface', port_name,
'external_ids')['test'])
def test_attribute_lifecycle(self):
(port_name, ofport) = self.create_ovs_port()
tag = 42
self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
self.ovs.clear_db_attribute('Port', port_name, 'tag')
self.assertEqual(self.ovs.db_get_val('Port', port_name, 'tag'), [])
self.assertEqual(self.br.get_port_tag_dict()[port_name], [])
def test_get_bridge_external_bridge_id(self):
self.ovs.set_db_attribute('Bridge', self.br.br_name,
'external_ids',
{'bridge-id': self.br.br_name})
self.assertEqual(
self.br.br_name,
self.ovs.get_bridge_external_bridge_id(self.br.br_name))
def test_controller_lifecycle(self):
controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
self.br.set_controller(controllers)
self.assertSetEqual(controllers, set(self.br.get_controller()))
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def test_set_fail_mode(self):
self.br.set_secure_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
def _assert_br_fail_mode(self, fail_mode):
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
fail_mode)
def test_set_protocols(self):
self.br.set_protocols('OpenFlow10')
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
"OpenFlow10")
def test_get_datapath_id(self):
brdev = ip_lib.IPDevice(self.br.br_name)
dpid = brdev.link.attributes['link/ether'].replace(':', '')
self.br.set_db_attribute('Bridge',
self.br.br_name, 'datapath_id', dpid)
self.assertIn(dpid, self.br.get_datapath_id())
def test_add_tunnel_port(self):
attrs = {
'remote_ip': '192.0.2.1', # RFC 5737 TEST-NET-1
'local_ip': '198.51.100.1', # RFC 5737 TEST-NET-2
}
port_name = net_helpers.get_rand_port_name()
self.br.add_tunnel_port(port_name, attrs['remote_ip'],
attrs['local_ip'])
self.assertEqual(self.ovs.db_get_val('Interface', port_name, 'type'),
'gre')
options = self.ovs.db_get_val('Interface', port_name, 'options')
for attr, val in attrs.items():
self.assertEqual(val, options[attr])
def test_add_patch_port(self):
local = net_helpers.get_rand_port_name()
peer = 'remotepeer'
self.br.add_patch_port(local, peer)
self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
'patch')
options = self.ovs.db_get_val('Interface', local, 'options')
self.assertEqual(peer, options['peer'])
def test_get_port_name_list(self):
# Note that ovs-vsctl's list-ports does not include the port created
# with the same name as the bridge
ports = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ports, set(self.br.get_port_name_list()))
def test_get_port_stats(self):
# Nothing seems to use this function?
(port_name, ofport) = self.create_ovs_port()
stats = set(self.br.get_port_stats(port_name).keys())
self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
def test_get_vif_ports(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_port_set(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(2)]
ports = self.br.get_vif_port_set()
expected = set([x.vif_id for x in vif_ports])
self.assertEqual(expected, ports)
def test_get_port_tag_dict(self):
# Simple case tested in port test_set_get_clear_db_val
pass
def test_get_vif_port_by_id(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
for vif in vif_ports:
self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
vif.vif_id)
def test_delete_ports(self):
# TODO(twilson) I intensely dislike the current delete_ports function
# as the default behavior is really delete_vif_ports(), then it acts
# more like a delete_ports() seems like it should if all_ports=True is
# passed
# Create 2 non-vif ports and 2 vif ports
nonvifs = {self.create_ovs_port()[0] for i in range(2)}
vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
self.assertSetEqual(nonvifs.union(vifs),
set(self.br.get_port_name_list()))
self.br.delete_ports()
self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
self.br.delete_ports(all_ports=True)
self.assertEqual(len(self.br.get_port_name_list()), 0)
def test_reset_bridge(self):
self.create_ovs_port()
self.br.reset_bridge()
self.assertEqual(len(self.br.get_port_name_list()), 0)
self._assert_br_fail_mode([])
def test_reset_bridge_secure_mode(self):
self.br.reset_bridge(secure_mode=True)
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
class OVSLibTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSLibTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
def test_bridge_lifecycle_baseovs(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, name)
br = self.ovs.add_bridge(name)
self.assertEqual(br.br_name, name)
self.assertTrue(self.ovs.bridge_exists(name))
self.ovs.delete_bridge(name)
self.assertFalse(self.ovs.bridge_exists(name))
def test_get_bridges(self):
bridges = {
self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
for i in range(5)}
self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
def test_bridge_lifecycle_ovsbridge(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
br = ovs_lib.OVSBridge(name)
self.assertEqual(br.br_name, name)
# Make sure that instantiating an OVSBridge does not actually create
self.assertFalse(self.ovs.bridge_exists(name))
self.addCleanup(self.ovs.delete_bridge, name)
br.create()
self.assertTrue(self.ovs.bridge_exists(name))
br.destroy()
self.assertFalse(self.ovs.bridge_exists(name))
|
|
#!/usr/bin/env python
from sys import exit, stdin
from os import environ, path, unlink
from tempfile import NamedTemporaryFile
from subprocess import Popen, PIPE
from argparse import ArgumentParser
import parser
import lexer
__author__ = "Juan J. Martinez <[email protected]>"
__version__ = "0.6.3"
app_name = "JTC"
project_url = "http://www.usebox.net/jjm/jtc/"
operators = ('+', '-', '*', '/', '=', '<>', '>', '<', '>=', '<=', 'and', 'or', 'mod', 'not')
enum = ('ADD', 'SUB', 'MUL', 'DIV', 'EQ', 'NE', 'GT', 'LT', 'GE', 'LE', 'AND', 'OR', 'MOD', 'NOT')
op_trans = dict(zip(operators, enum))
class Id(object):
index = 1
ids = dict()
stack = []
#INTEGER = 1
#FLOAT = 2
#STRING = 3
ID = 10
FUNC = 11
@staticmethod
def add(lineno, type, id, uvalue=None, params=None):
Id.ids[id] = Id(lineno, type, uvalue, params)
Id.index += 1
return Id.ids[id].index
@staticmethod
def get(id):
return Id.ids[id]
@staticmethod
def enter():
Id.stack.append([Id.ids, Id.index])
Id.ids = dict()
Id.index = 1
@staticmethod
def leave():
Id.ids, Id.index = Id.stack[-1]
Id.stack = Id.stack[:-1]
@staticmethod
def exists(id):
try:
return Id.ids[id]
except KeyError:
return None
def __init__(self, lineno, type, uvalue=None, params=None):
self.index = Id.index
self.lineno = lineno
self.type = type
self.uvalue = uvalue
self.params = params
def __repr__(self):
return "%r (%r, %r)" % (self.index, self.lineno, self.type)
def func_sign(node):
params = node.sub[0].sub
cparams = ', '.join(["obj *%s" % p for p in params])
return """obj *_%s(%s)""" % (node.uvalue, cparams)
def do_func(node):
Id.enter()
# make the function available inside itself to support
# recursive calls
nparams = len(node.sub[0].sub)
Id.add(node.lineno, Id.FUNC, node.value, node.uvalue, nparams)
output = "\n" + func_sign(node) + " { st *_ctx = NULL; "
for value in node.sub[0].sub:
index = Id.add(node.sub[0].lineno, Id.ID, value)
output += "store(&_ctx, %d, %d, %s); " % (node.sub[0].lineno, index, value)
output += " %s" % do_block(node.sub[1])
# all functions return 0 by default (unless there's an user provided return!)
output += "\nreturn o_return(&_ctx, o_int(0, 0)); }\n"
Id.leave()
return output
def do_if(node):
expr, block = node.sub
return """
if (o_lval(%d, %s)) { %s}
""" % (node.lineno, do_expr(expr), do_block(block))
def do_if_else(node):
expr, block, elseb = node.sub
return """
if (o_lval(%d, %s)) { %s} else { %s}
""" % (node.lineno, do_expr(expr), do_block(block), do_block(elseb))
def do_loop(node):
expr, block = node.sub
return """
while (o_lval(%d, %s)) { %s}
""" % (node.lineno, do_expr(expr), do_block(block))
def do_retrieve(node):
if not Id.exists(node.value):
print("line %d: undefined identifier %r" % (node.lineno, node.value))
exit(1)
index = Id.get(node.value).index
return "retrieve(&_ctx, %d, %d)" % (node.lineno, index)
def do_dict_index(node):
if node.sub[0].type == "string":
output = do_expr(node.sub[0])
else:
output = "o_dict_index(%d, %s)" % (node.lineno, do_expr(node.sub[0]))
return output
def do_expr(node):
output = ""
if node.type == "retrieve":
output += do_retrieve(node)
elif node.type == "numeric":
if isinstance(node.value, int):
output += "o_int(%d, %d)" % (node.lineno, node.value)
else:
output += "o_float(%d, %f)" % (node.lineno, node.value)
elif node.type == "string":
output += "o_string(%d, %s)" % (node.lineno, node.value)
elif node.type == "binop":
output += "o_op(%d, %s, %s, %s)" % (node.lineno, op_trans[node.value], do_expr(node.sub[0]), do_expr(node.sub[1]))
elif node.type == "unaop":
output += "o_op(%d, %s, %s, NULL)" % (node.lineno, op_trans[node.value], do_expr(node.sub[0]))
elif node.type == "call":
exists = Id.exists(node.value)
if exists and exists.type == Id.FUNC:
if exists.params != len(node.sub[0].sub):
print("line %d: %r expects %d parameters" % (node.lineno, node.value, exists.params))
exit(1)
params = ', '.join([do_expr(p) for p in node.sub[0].sub])
output += "_%s(%s)" % (exists.uvalue, params)
else:
print("line %d: undefined function %r" % (node.lineno, node.value))
exit(1)
elif node.type == "typeof":
output += "o_typeof(%d, %s)" % (node.lineno, do_expr(node.sub[0]))
elif node.type == "clone":
output += "o_clone(%d, %s)" % (node.lineno, do_retrieve(node.sub[0]))
elif node.type == "dict":
output += "o_dict(%d)" % node.lineno
elif node.type in ("dict-get", "dict-test"):
dict_index = do_dict_index(node)
if node.type == "dict-get":
func = "o_dict_get"
else:
func = "o_dict_test"
output += "%s(%d, %s, %s)" % (func, node.lineno, do_retrieve(node), dict_index)
return output
def do_block(node):
output = ""
for c in node.sub:
if c.type == "func":
exists = Id.exists(c.value)
if exists:
print("line %d: %r already defined in line %d in this context" % (c.lineno, c.value, exists.lineno))
exit(1)
# make the function available to this scope
nparams = len(c.sub[0].sub)
Id.add(c.lineno, Id.FUNC, c.value, c.uvalue, nparams)
elif c.type == "store":
exists = Id.exists(c.value)
if exists and exists.type == Id.FUNC:
print("line %d: %r already defined as function in line %d" % (c.lineno, c.value, exists.lineno))
if not exists:
index = Id.add(c.lineno, Id.ID, c.value)
else:
index = Id.get(c.value).index
output += "store(&_ctx, %d, %d, %s);\n" % (c.lineno, index, do_expr(c.sub[0]))
elif c.type == "if":
output += do_if(c) + "\n"
elif c.type == "if-else":
output += do_if_else(c) + "\n"
elif c.type == "loop":
output += do_loop(c) + "\n"
elif c.type == "return":
output += "return o_return(&_ctx, %s);\n" % do_expr(c.sub[0])
# we need the context!
Id.no_func = True
elif c.type == "println":
params = ', '.join([do_expr(p) for p in c.sub[0].sub])
output += "println(%d, %s);\n" % (len(c.sub[0].sub), params)
elif c.type == "dict-set":
dict_index = do_dict_index(c)
output += "o_dict_set(%d, %s, %s, %s);\n" % (c.lineno, do_retrieve(c), dict_index, do_expr(c.sub[1]))
else:
output += do_expr(c) + "; "
return output
def generate(ast):
output = """\
/*
* jtc ver %s
* source: %s
*/
#include "rt.h"
""" % (__version__, ast.source)
if ast.sub[0].sub:
print("line %d: syntax error: main function parameters" % ast.sub[0].lineno)
exit(1)
for f in ast.funcs:
output += func_sign(f) + ";\n"
for f in ast.funcs:
output += do_func(f)
output += """
int _ep() { obj *o = _%s(); return o_lval(0, o); }
/* EOF */
""" % ast.uvalue
return output
if __name__ == "__main__":
ap = ArgumentParser(description="%s (Juan's Toy Compiler)" % app_name,
epilog=project_url,
)
ap.add_argument("source", help="source file to compile (use - for stdin)")
ap.add_argument("--lexer", action="store_true", help="dump lexer output and exit")
ap.add_argument("--parser", action="store_true", help="dump parser output and exit")
ap.add_argument("-c", action="store_true", help="dump C output and exit")
ap.add_argument("--debug", action="store_true", help="enable debug")
ap.add_argument("--no-gc", action="store_true", help="disable the garbage collector")
ap.add_argument("--verbose", action="store_true", help="enable verbose output")
ap.add_argument("--version", action="version", version="%(prog)s " + __version__)
args = ap.parse_args()
if args.verbose:
print("starting: %s ver %s" % (app_name, __version__))
if args.verbose:
print("reading source from:", args.source)
if args.source == "-":
source = "<stdin>"
data = stdin.read()
else:
source = args.source
try:
fd = open(args.source, "rt")
except IOError:
ap.error("failed to open %r" % args.source)
try:
data = fd.read()
except IOError as ex:
ap.error("failed to read %r: %s" % (args.source, ex))
finally:
fd.close()
if args.lexer:
l = lexer.Lexer()
l.build()
print(l.test(data))
exit(0)
ast = parser.parse(data, debug=args.debug)
if not ast:
exit(1)
ast.source = source
if args.parser:
print(ast)
exit(0)
if args.verbose:
print("generating code: %d function(s)" % len(ast.funcs))
c = generate(ast)
if args.c:
print(c)
exit(1)
cc = environ.get("CC", "gcc")
cflags = environ.get("CFLAGS", None)
home = environ.get("JTCHOME", path.abspath(path.dirname(__file__)))
fd = NamedTemporaryFile(mode="wt", suffix=".c", delete=False)
try:
fd.write(c)
fd.close()
cmd = [cc,]
if cflags:
cmd += cflags.split(" ")
cmd += ["-std=c99", "-Wall", "-I%s" % path.join(home, "include"), fd.name, "-o", source + ".out", "-lm"]
if not args.no_gc:
cmd.append("-lgc")
else:
cmd.append("-DDISABLE_GC")
if args.debug:
cmd.append("-ggdb")
else:
cmd.extend(["-s", "-O2"])
if args.verbose:
print("compiling:", ' '.join(cmd))
try:
p = Popen(cmd, stderr=PIPE, close_fds=True)
if p.wait() != 0:
errors = p.stderr.read()
if any(e in errors for e in (b"-lgc", b"gc.h:")):
print("*** gc not found, use --no-gc to disable the garbage collector")
print("%s output: %r" % (cc, errors))
exit(1)
except BaseException as ex:
print("error running the C compiler: %s" % ex)
exit(1)
finally:
unlink(fd.name)
if args.verbose:
print("done")
exit(0)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import time, _socket, poplib, imaplib, email, email.utils, datetime, chardet, re
from email_reply_parser import EmailReplyParser
from email.header import decode_header
import frappe
from frappe import _
from frappe.utils import (extract_email_id, convert_utc_to_user_timezone, now,
cint, cstr, strip, markdown)
from frappe.utils.scheduler import log
from frappe.utils.file_manager import get_random_filename, save_file, MaxFileSizeReachedError
class EmailSizeExceededError(frappe.ValidationError): pass
class EmailTimeoutError(frappe.ValidationError): pass
class TotalSizeExceededError(frappe.ValidationError): pass
class LoginLimitExceeded(frappe.ValidationError): pass
class EmailServer:
"""Wrapper for POP server to pull emails."""
def __init__(self, args=None):
self.setup(args)
def setup(self, args=None):
# overrride
self.settings = args or frappe._dict()
def check_mails(self):
# overrride
return True
def process_message(self, mail):
# overrride
pass
def connect(self):
"""Connect to **Email Account**."""
if cint(self.settings.use_imap):
return self.connect_imap()
else:
return self.connect_pop()
def connect_imap(self):
"""Connect to IMAP"""
try:
if cint(self.settings.use_ssl):
self.imap = Timed_IMAP4_SSL(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
else:
self.imap = Timed_IMAP4(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
self.imap.login(self.settings.username, self.settings.password)
# connection established!
return True
except _socket.error:
# Invalid mail server -- due to refusing connection
frappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))
raise
except Exception, e:
frappe.msgprint(_('Cannot connect: {0}').format(str(e)))
raise
def connect_pop(self):
#this method return pop connection
try:
if cint(self.settings.use_ssl):
self.pop = Timed_POP3_SSL(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
else:
self.pop = Timed_POP3(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
self.pop.user(self.settings.username)
self.pop.pass_(self.settings.password)
# connection established!
return True
except _socket.error:
# log performs rollback and logs error in scheduler log
log("receive.connect_pop")
# Invalid mail server -- due to refusing connection
frappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))
raise
except poplib.error_proto, e:
if self.is_temporary_system_problem(e):
return False
else:
frappe.msgprint(_('Invalid User Name or Support Password. Please rectify and try again.'))
raise
def get_messages(self):
"""Returns new email messages in a list."""
if not self.check_mails():
return # nothing to do
frappe.db.commit()
if not self.connect():
return []
try:
# track if errors arised
self.errors = False
self.latest_messages = []
email_list = self.get_new_mails()
num = num_copy = len(email_list)
# WARNING: Hard coded max no. of messages to be popped
if num > 20: num = 20
# size limits
self.total_size = 0
self.max_email_size = cint(frappe.local.conf.get("max_email_size"))
self.max_total_size = 5 * self.max_email_size
for i, message_meta in enumerate(email_list):
# do not pull more than NUM emails
if (i+1) > num:
break
try:
self.retrieve_message(message_meta, i+1)
except (TotalSizeExceededError, EmailTimeoutError, LoginLimitExceeded):
break
# WARNING: Mark as read - message number 101 onwards from the pop list
# This is to avoid having too many messages entering the system
num = num_copy
if not cint(self.settings.use_imap):
if num > 100 and not self.errors:
for m in xrange(101, num+1):
self.pop.dele(m)
except Exception, e:
if self.has_login_limit_exceeded(e):
pass
else:
raise
finally:
# no matter the exception, pop should quit if connected
if cint(self.settings.use_imap):
self.imap.logout()
else:
self.pop.quit()
return self.latest_messages
def get_new_mails(self):
"""Return list of new mails"""
if cint(self.settings.use_imap):
self.imap.select("Inbox")
response, message = self.imap.uid('search', None, "UNSEEN")
email_list = message[0].split()
else:
email_list = self.pop.list()[1]
return email_list
def retrieve_message(self, message_meta, msg_num=None):
incoming_mail = None
try:
self.validate_message_limits(message_meta)
if cint(self.settings.use_imap):
status, message = self.imap.uid('fetch', message_meta, '(RFC822)')
self.latest_messages.append(message[0][1])
else:
msg = self.pop.retr(msg_num)
self.latest_messages.append(b'\n'.join(msg[1]))
except (TotalSizeExceededError, EmailTimeoutError):
# propagate this error to break the loop
self.errors = True
raise
except Exception, e:
if self.has_login_limit_exceeded(e):
self.errors = True
raise LoginLimitExceeded, e
else:
# log performs rollback and logs error in scheduler log
log("receive.get_messages", self.make_error_msg(msg_num, incoming_mail))
self.errors = True
frappe.db.rollback()
if not cint(self.settings.use_imap):
self.pop.dele(msg_num)
else:
# mark as seen
self.imap.uid('STORE', message_meta, '+FLAGS', '(\\SEEN)')
else:
if not cint(self.settings.use_imap):
self.pop.dele(msg_num)
else:
# mark as seen
self.imap.uid('STORE', message_meta, '+FLAGS', '(\\SEEN)')
def has_login_limit_exceeded(self, e):
return "-ERR Exceeded the login limit" in strip(cstr(e.message))
def is_temporary_system_problem(self, e):
messages = (
"-ERR [SYS/TEMP] Temporary system problem. Please try again later.",
"Connection timed out",
)
for message in messages:
if message in strip(cstr(e.message)) or message in strip(cstr(getattr(e, 'strerror', ''))):
return True
return False
def validate_message_limits(self, message_meta):
# throttle based on email size
if not self.max_email_size:
return
m, size = message_meta.split()
size = cint(size)
if size < self.max_email_size:
self.total_size += size
if self.total_size > self.max_total_size:
raise TotalSizeExceededError
else:
raise EmailSizeExceededError
def make_error_msg(self, msg_num, incoming_mail):
error_msg = "Error in retrieving email."
if not incoming_mail:
try:
# retrieve headers
incoming_mail = Email(b'\n'.join(self.pop.top(msg_num, 5)[1]))
except:
pass
if incoming_mail:
error_msg += "\nDate: {date}\nFrom: {from_email}\nSubject: {subject}\n".format(
date=incoming_mail.date, from_email=incoming_mail.from_email, subject=incoming_mail.subject)
return error_msg
class Email:
"""Wrapper for an email."""
def __init__(self, content):
"""Parses headers, content, attachments from given raw message.
:param content: Raw message."""
self.raw = content
self.mail = email.message_from_string(self.raw)
self.text_content = ''
self.html_content = ''
self.attachments = []
self.cid_map = {}
self.parse()
self.set_content_and_type()
self.set_subject()
self.set_from()
if self.mail["Date"]:
utc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail["Date"]))
utc_dt = datetime.datetime.utcfromtimestamp(utc)
self.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')
else:
self.date = now()
def parse(self):
"""Walk and process multi-part email."""
for part in self.mail.walk():
self.process_part(part)
def set_subject(self):
"""Parse and decode `Subject` header."""
_subject = decode_header(self.mail.get("Subject", "No Subject"))
self.subject = _subject[0][0] or ""
if _subject[0][1]:
self.subject = self.subject.decode(_subject[0][1])
else:
# assume that the encoding is utf-8
self.subject = self.subject.decode("utf-8")
if not self.subject:
self.subject = "No Subject"
def set_from(self):
# gmail mailing-list compatibility
# use X-Original-Sender if available, as gmail sometimes modifies the 'From'
_from_email = self.mail.get("X-Original-From") or self.mail["From"]
_from_email, encoding = decode_header(_from_email)[0]
if encoding:
_from_email = _from_email.decode(encoding)
else:
_from_email = _from_email.decode('utf-8')
self.from_email = extract_email_id(_from_email)
self.from_real_name = email.utils.parseaddr(_from_email)[0]
def set_content_and_type(self):
self.content, self.content_type = '[Blank Email]', 'text/plain'
if self.html_content:
self.content, self.content_type = self.html_content, 'text/html'
else:
self.content, self.content_type = EmailReplyParser.parse_reply(self.text_content), 'text/plain'
def process_part(self, part):
"""Parse email `part` and set it to `text_content`, `html_content` or `attachments`."""
content_type = part.get_content_type()
if content_type == 'text/plain':
self.text_content += self.get_payload(part)
elif content_type == 'text/html':
self.html_content += self.get_payload(part)
elif content_type == 'message/rfc822':
# sent by outlook when another email is sent as an attachment to this email
self.show_attached_email_headers_in_content(part)
elif part.get_filename():
self.get_attachment(part)
def show_attached_email_headers_in_content(self, part):
# get the multipart/alternative message
message = list(part.walk())[1]
headers = []
for key in ('From', 'To', 'Subject', 'Date'):
value = cstr(message.get(key))
if value:
headers.append('{label}: {value}'.format(label=_(key), value=value))
self.text_content += '\n'.join(headers)
self.html_content += '<hr>' + '\n'.join('<p>{0}</p>'.format(h) for h in headers)
if not message.is_multipart() and message.get_content_type()=='text/plain':
# email.parser didn't parse it!
text_content = self.get_payload(message)
self.text_content += text_content
self.html_content += markdown(text_content)
def get_charset(self, part):
"""Detect chartset."""
charset = part.get_content_charset()
if not charset:
charset = chardet.detect(str(part))['encoding']
return charset
def get_payload(self, part):
charset = self.get_charset(part)
try:
return unicode(part.get_payload(decode=True), str(charset), "ignore")
except LookupError:
return part.get_payload()
def get_attachment(self, part):
charset = self.get_charset(part)
fcontent = part.get_payload(decode=True)
if fcontent:
content_type = part.get_content_type()
fname = part.get_filename()
if fname:
try:
fname = cstr(decode_header(fname)[0][0])
except:
fname = get_random_filename(content_type=content_type)
else:
fname = get_random_filename(content_type=content_type)
self.attachments.append({
'content_type': content_type,
'fname': fname,
'fcontent': fcontent,
})
cid = (part.get("Content-Id") or "").strip("><")
if cid:
self.cid_map[fname] = cid
def save_attachments_in_doc(self, doc):
"""Save email attachments in given document."""
saved_attachments = []
for attachment in self.attachments:
try:
file_data = save_file(attachment['fname'], attachment['fcontent'],
doc.doctype, doc.name, is_private=1)
saved_attachments.append(file_data)
if attachment['fname'] in self.cid_map:
self.cid_map[file_data.name] = self.cid_map[attachment['fname']]
except MaxFileSizeReachedError:
# WARNING: bypass max file size exception
pass
except frappe.DuplicateEntryError:
# same file attached twice??
pass
return saved_attachments
def get_thread_id(self):
"""Extract thread ID from `[]`"""
l = re.findall('(?<=\[)[\w/-]+', self.subject)
return l and l[0] or None
# fix due to a python bug in poplib that limits it to 2048
poplib._MAXLINE = 20480
class TimerMixin(object):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop('timeout', 0.0)
self.elapsed_time = 0.0
self._super.__init__(self, *args, **kwargs)
if self.timeout:
# set per operation timeout to one-fifth of total pop timeout
self.sock.settimeout(self.timeout / 5.0)
def _getline(self, *args, **kwargs):
start_time = time.time()
ret = self._super._getline(self, *args, **kwargs)
self.elapsed_time += time.time() - start_time
if self.timeout and self.elapsed_time > self.timeout:
raise EmailTimeoutError
return ret
def quit(self, *args, **kwargs):
self.elapsed_time = 0.0
return self._super.quit(self, *args, **kwargs)
class Timed_POP3(TimerMixin, poplib.POP3):
_super = poplib.POP3
class Timed_POP3_SSL(TimerMixin, poplib.POP3_SSL):
_super = poplib.POP3_SSL
class Timed_IMAP4(TimerMixin, imaplib.IMAP4):
_super = imaplib.IMAP4
class Timed_IMAP4_SSL(TimerMixin, imaplib.IMAP4_SSL):
_super = imaplib.IMAP4_SSL
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from contextlib import contextmanager
import functools
import os
import pkgutil
import posixpath
import re
import io
import sys
import tempfile
import unittest
import six
from six.moves import urllib
from six.moves import cStringIO
import boto
import crcmod
import mock_storage_service # From boto/tests/integration/s3
from gslib.cloud_api import ResumableDownloadException
from gslib.cloud_api import ResumableUploadException
from gslib.lazy_wrapper import LazyWrapper
import gslib.tests as gslib_tests
from gslib.utils.boto_util import UsingCrcmodExtension
from gslib.utils.constants import UTF8
from gslib.utils.encryption_helper import Base64Sha256FromBase64EncryptionKey
from gslib.utils.posix_util import GetDefaultMode
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.unit_util import MakeHumanReadable
# pylint: disable=g-import-not-at-top, g-long-lambda
if not IS_WINDOWS:
import grp
import pwd
def GetInvalidGid():
# Get a list of all GIDs on the system for quick reference.
all_gid = sorted([group.gr_gid for group in grp.getgrall()])
# gid - Current GID being tested, 2k is close to a large empty span on most
# unix systems and a good starting point.
gid = 2000
# OverflowError should prevent loop from reaching 5b, but adding a number
# to the loop ensures that infinite loop does not occur
while gid < 5000000000:
if gid in all_gid:
# Shortcut check; if gid is in list then the group exists.
gid += 1
continue
try:
# Testing for expected behaviour while testing POSIX permissions.
# For more on grp see:
# https://docs.python.org/3.7/library/grp.html
grp.getgrgid(gid)
gid += 1
except KeyError:
# This is the target exception for invalid GID and the behaviour needed
# for testing.
return gid
except OverflowError:
# Limit reached without a usable GID found.
break
raise Exception("Unable to generate GID for ")
def GetNonPrimaryGid():
# Select a group for the current user that is not the user's primary group.
# If the length of the user's groups is 1, then we must use the primary
# group. Otherwise put all of the user's groups (except the primary group)
# in a list, and use the first element. This guarantees us a group that is
# not the user's primary group (unless the user is only a member of one
# group).
primary_gid = GetPrimaryGid()
user_groups = GetUserGroups()
if len(user_groups) == 1:
return primary_gid
return [g for g in list(user_groups) if g != primary_gid][0]
def GetPrimaryGid():
return os.getgid()
def GetUserGroups():
return set([GetPrimaryGid()] +
[g.gr_gid for g in grp.getgrall() if USER_NAME() in g.gr_mem])
DEFAULT_MODE = int(GetDefaultMode(), 8)
USER_ID = os.getuid()
USER_NAME = LazyWrapper(lambda: pwd.getpwuid(USER_ID).pw_name)
# Take the current user's UID and increment it by one, this counts as an
# invalid UID, as the metric used is if the UID matches the current user's,
# exactly.
INVALID_UID = LazyWrapper(
lambda: sorted([user.pw_uid for user in pwd.getpwall()])[-1] + 1)
# Note that because the system's GID mapping can change mid-test, tests that
# check for specific errors should always re-fetch these GID-related values,
# rather than reusing these LazyWrapper values.
INVALID_GID = LazyWrapper(lambda: GetInvalidGid())
# Get a list of all groups on the system where the current username is listed
# as a member of the group in the gr_mem group attribute. Make this a list of
# all group IDs and cast as a set for more efficient lookup times.
USER_GROUPS = LazyWrapper(lambda: GetUserGroups())
# 256-bit base64 encryption keys used for testing AES256 customer-supplied
# encryption. These are public and open-source, so don't ever use them for
# real data.
TEST_ENCRYPTION_KEY1 = b'iMSM9eeXliDZHSBJZO71R98tfeW/+87VXTpk5chGd6Y='
TEST_ENCRYPTION_KEY1_SHA256_B64 = Base64Sha256FromBase64EncryptionKey(
TEST_ENCRYPTION_KEY1)
TEST_ENCRYPTION_KEY2 = b'4TSaQ3S4U+5oxAbByA7HgIigD51zfzGed/c03Ts2TXc='
TEST_ENCRYPTION_KEY2_SHA256_B64 = Base64Sha256FromBase64EncryptionKey(
TEST_ENCRYPTION_KEY2)
TEST_ENCRYPTION_KEY3 = b'HO4Q2X28N/6SmuAJ1v1CTuJjf5emQcXf7YriKzT1gj0='
TEST_ENCRYPTION_KEY3_SHA256_B64 = Base64Sha256FromBase64EncryptionKey(
TEST_ENCRYPTION_KEY3)
TEST_ENCRYPTION_KEY4 = b'U6zIErjZCK/IpIeDS0pJrDayqlZurY8M9dvPJU0SXI8='
TEST_ENCRYPTION_KEY4_SHA256_B64 = Base64Sha256FromBase64EncryptionKey(
TEST_ENCRYPTION_KEY4)
TEST_ENCRYPTION_CONTENT1 = b'bar'
TEST_ENCRYPTION_CONTENT1_MD5 = 'N7UdGUp1E+RbVvZSTy1R8g=='
TEST_ENCRYPTION_CONTENT1_CRC32C = 'CrcTMQ=='
TEST_ENCRYPTION_CONTENT2 = b'bar2'
TEST_ENCRYPTION_CONTENT2_MD5 = 'Ik4lOfUiA+szcorNIotEMg=='
TEST_ENCRYPTION_CONTENT2_CRC32C = 'QScXtg=='
TEST_ENCRYPTION_CONTENT3 = b'bar3'
TEST_ENCRYPTION_CONTENT3_MD5 = '9iW6smjfu9hm0A//VQTQfw=='
TEST_ENCRYPTION_CONTENT3_CRC32C = 's0yUtQ=='
TEST_ENCRYPTION_CONTENT4 = b'bar4'
TEST_ENCRYPTION_CONTENT4_MD5 = 'kPCx6uZiUOU7W6E+cDCZFg=='
TEST_ENCRYPTION_CONTENT4_CRC32C = 'Z4bwXg=='
TEST_ENCRYPTION_CONTENT5 = b'bar5'
TEST_ENCRYPTION_CONTENT5_MD5 = '758XbXQOVkp8fTKMm83NXA=='
TEST_ENCRYPTION_CONTENT5_CRC32C = 'le1zXQ=='
# Flags for running different types of tests.
RUN_INTEGRATION_TESTS = True
RUN_UNIT_TESTS = True
RUN_S3_TESTS = False
USE_MULTIREGIONAL_BUCKETS = False
PARALLEL_COMPOSITE_UPLOAD_TEST_CONFIG = '/tmp/.boto.parallel_upload_test_config'
ORPHANED_FILE = ('This sync will orphan file(s), please fix their permissions '
'before trying again.')
POSIX_MODE_ERROR = 'Mode for %s won\'t allow read access.'
POSIX_GID_ERROR = 'GID for %s doesn\'t exist on current system.'
POSIX_UID_ERROR = 'UID for %s doesn\'t exist on current system.'
POSIX_INSUFFICIENT_ACCESS_ERROR = 'Insufficient access with uid/gid/mode for %s'
def BuildErrorRegex(obj, err_str):
"""Builds a regex to match a file name for a file that would be orphaned.
Args:
obj: Object uri.
err_str: The error string to search for.
Returns:
A regex that will match the file name and with the error text for a file
that would be orphaned.
"""
return re.compile(err_str % ObjectToURI(obj))
def TailSet(start_point, listing):
"""Returns set of object name tails.
Tails can be compared between source and dest, past the point at which the
command was done. For example if test ran {cp,mv,rsync}
gs://bucket1/dir gs://bucket2/dir2, the tails for listings from bucket1
would start after "dir", while the tails for listings from bucket2 would
start after "dir2".
Args:
start_point: The target of the cp command, e.g., for the above command it
would be gs://bucket1/dir for the bucket1 listing results and
gs://bucket2/dir2 for the bucket2 listing results.
listing: The listing over which to compute tail.
Returns:
Object name tails.
"""
return set(l[len(start_point):] for l in listing.strip().split('\n'))
HAS_S3_CREDS = (boto.config.get('Credentials', 'aws_access_key_id', None) and
boto.config.get('Credentials', 'aws_secret_access_key', None))
_GS_HOST = boto.config.get('Credentials', 'gs_host', None)
_DEFAULT_HOST = six.ensure_str(boto.gs.connection.GSConnection.DefaultHost)
if _GS_HOST is not None:
HAS_NON_DEFAULT_GS_HOST = _DEFAULT_HOST == six.ensure_str(_GS_HOST)
else:
HAS_NON_DEFAULT_GS_HOST = False
HAS_GS_HOST = _GS_HOST is not None
HAS_GS_PORT = boto.config.get('Credentials', 'gs_port', None) is not None
USING_JSON_API = boto.config.get('GSUtil', 'prefer_api',
'json').upper() != 'XML'
def _ArgcompleteAvailable():
argcomplete = None
if not IS_WINDOWS:
try:
# pylint: disable=g-import-not-at-top
import argcomplete
except ImportError:
pass
return argcomplete is not None
ARGCOMPLETE_AVAILABLE = _ArgcompleteAvailable()
def _NormalizeURI(uri):
"""Normalizes the path component of a URI.
Args:
uri: URI to normalize.
Returns:
Normalized URI.
Examples:
gs://foo//bar -> gs://foo/bar
gs://foo/./bar -> gs://foo/bar
"""
# Note: we have to do this dance of changing gs:// to file:// because on
# Windows, the urlparse function won't work with URL schemes that are not
# known. urlparse('gs://foo/bar') on Windows turns into:
# scheme='gs', netloc='', path='//foo/bar'
# while on non-Windows platforms, it turns into:
# scheme='gs', netloc='foo', path='/bar'
uri = uri.replace('gs://', 'file://')
parsed = list(urllib.parse.urlparse(uri))
parsed[2] = posixpath.normpath(parsed[2])
if parsed[2].startswith('//'):
# The normpath function doesn't change '//foo' -> '/foo' by design.
parsed[2] = parsed[2][1:]
unparsed = urllib.parse.urlunparse(parsed)
unparsed = unparsed.replace('file://', 'gs://')
return unparsed
def GenerationFromURI(uri):
"""Returns a the generation for a StorageUri.
Args:
uri: boto.storage_uri.StorageURI object to get the URI from.
Returns:
Generation string for the URI.
"""
if not (uri.generation or uri.version_id):
if uri.scheme == 's3': return 'null'
return uri.generation or uri.version_id
def ObjectToURI(obj, *suffixes):
"""Returns the storage URI string for a given StorageUri or file object.
Args:
obj: The object to get the URI from. Can be a file object, a subclass of
boto.storage_uri.StorageURI, or a string. If a string, it is assumed to
be a local on-disk path.
*suffixes: Suffixes to append. For example, ObjectToUri(bucketuri, 'foo')
would return the URI for a key name 'foo' inside the given
bucket.
Returns:
Storage URI string.
"""
if is_file(obj):
return 'file://{}'.format(os.path.abspath(os.path.join(obj.name,
*suffixes)))
if isinstance(obj, six.string_types):
return 'file://{}'.format(os.path.join(obj, *suffixes))
uri = six.ensure_text(obj.uri)
if suffixes:
suffixes_list = [six.ensure_text(suffix) for suffix in suffixes]
uri = _NormalizeURI('/'.join([uri] + suffixes_list))
# Storage URIs shouldn't contain a trailing slash.
if uri.endswith('/'):
uri = uri[:-1]
return uri
class GSMockConnection(mock_storage_service.MockConnection):
def __init__(self, *args, **kwargs):
kwargs['provider'] = 'gs'
self.debug = 0
super(GSMockConnection, self).__init__(*args, **kwargs)
mock_connection = GSMockConnection()
class GSMockBucketStorageUri(mock_storage_service.MockBucketStorageUri):
def connect(self, access_key_id=None, secret_access_key=None):
return mock_connection
def compose(self, components, headers=None):
"""Dummy implementation to allow parallel uploads with tests."""
return self.new_key()
def get_location(self, headers=None):
return 'US'
def get_cors(self, headers=None):
return boto.gs.cors.Cors()
def get_encryption_config(self, headers=None):
return boto.gs.encryptionconfig.EncryptionConfig()
def get_lifecycle_config(self, headers=None):
return None
def get_website_config(self, headers=None):
return None
def get_versioning_config(self, headers=None):
return None
TEST_BOTO_REMOVE_SECTION = 'TestRemoveSection'
def _SetBotoConfig(section, name, value, revert_list):
"""Sets boto configuration temporarily for testing.
SetBotoConfigForTest should be called by tests instead of this function.
This will ensure that the configuration is reverted to its original setting
using _RevertBotoConfig.
Args:
section: Boto config section to set
name: Boto config name to set
value: Value to set
revert_list: List for tracking configs to revert.
"""
prev_value = boto.config.get(section, name, None)
if not boto.config.has_section(section):
revert_list.append((section, TEST_BOTO_REMOVE_SECTION, None))
boto.config.add_section(section)
revert_list.append((section, name, prev_value))
if value is None:
boto.config.remove_option(section, name)
else:
boto.config.set(section, name, value)
def _RevertBotoConfig(revert_list):
"""Reverts boto config modifications made by _SetBotoConfig.
Args:
revert_list: List of boto config modifications created by calls to
_SetBotoConfig.
"""
sections_to_remove = []
for section, name, value in revert_list:
if value is None:
if name == TEST_BOTO_REMOVE_SECTION:
sections_to_remove.append(section)
else:
boto.config.remove_option(section, name)
else:
boto.config.set(section, name, value)
for section in sections_to_remove:
boto.config.remove_section(section)
def SequentialAndParallelTransfer(func):
"""Decorator for tests that perform file to object transfers, or vice versa.
This forces the test to run once normally, and again with special boto
config settings that will ensure that the test follows the parallel composite
upload and/or sliced object download code paths.
Args:
func: Function to wrap.
Returns:
Wrapped function.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
# Run the test normally once.
func(*args, **kwargs)
if not RUN_S3_TESTS and UsingCrcmodExtension():
# Try again, forcing parallel upload and sliced download.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'sliced_object_download_threshold', '1'),
('GSUtil', 'sliced_object_download_max_components', '3'),
('GSUtil', 'check_hashes', 'always')
]):
func(*args, **kwargs)
return Wrapper
def _SectionDictFromConfigList(boto_config_list):
"""Converts the input config list to a dict that is easy to write to a file.
This is used to reset the boto config contents for a test instead of
preserving the existing values.
Args:
boto_config_list: list of tuples of:
(boto config section to set, boto config name to set, value to set)
If value to set is None, no entry is created.
Returns:
Dictionary of {section: {keys: values}} for writing to the file.
"""
sections = {}
for config_entry in boto_config_list:
section, key, value = (config_entry[0], config_entry[1], config_entry[2])
if section not in sections:
sections[section] = {}
if value is not None:
sections[section][key] = value
return sections
def _WriteSectionDictToFile(section_dict, tmp_filename):
"""Writes a section dict from _SectionDictFromConfigList to tmp_filename."""
with open(tmp_filename, 'w') as tmp_file:
for section, key_value_pairs in six.iteritems(section_dict):
tmp_file.write('[%s]\n' % section)
for key, value in six.iteritems(key_value_pairs):
tmp_file.write('%s = %s\n' % (key, value))
@contextmanager
def SetDummyProjectForUnitTest():
"""Sets a dummy project in boto config for the duration of a 'with' clause."""
# Listing buckets requires a project ID, but unit tests should run
# regardless of whether one is specified in config.
with SetBotoConfigForTest([('GSUtil', 'default_project_id', 'dummy_proj')]):
yield
@contextmanager
def SetBotoConfigForTest(boto_config_list, use_existing_config=True):
"""Sets the input list of boto configs for the duration of a 'with' clause.
This preserves any existing boto configuration unless it is overwritten in
the provided boto_config_list.
Args:
boto_config_list: list of tuples of:
(boto config section to set, boto config name to set, value to set)
use_existing_config: If True, apply boto_config_list to the existing
configuration, preserving any original values unless they are
overwritten. Otherwise, apply boto_config_list to a blank configuration.
Yields:
Once after config is set.
"""
revert_configs = []
tmp_filename = None
try:
tmp_fd, tmp_filename = tempfile.mkstemp(prefix='gsutil-temp-cfg')
os.close(tmp_fd)
if use_existing_config:
for boto_config in boto_config_list:
boto_value = boto_config[2]
if six.PY3:
if isinstance(boto_value, bytes):
boto_value = boto_value.decode(UTF8)
_SetBotoConfig(boto_config[0], boto_config[1], boto_value,
revert_configs)
with open(tmp_filename, 'w') as tmp_file:
boto.config.write(tmp_file)
else:
_WriteSectionDictToFile(_SectionDictFromConfigList(boto_config_list),
tmp_filename)
with _SetBotoConfigFileForTest(tmp_filename):
yield
finally:
_RevertBotoConfig(revert_configs)
if tmp_filename:
try:
os.remove(tmp_filename)
except OSError:
pass
@contextmanager
def SetEnvironmentForTest(env_variable_dict):
"""Sets OS environment variables for a single test."""
def _ApplyDictToEnvironment(dict_to_apply):
for k, v in six.iteritems(dict_to_apply):
old_values[k] = os.environ.get(k)
if v is not None:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
old_values = {}
for k in env_variable_dict:
old_values[k] = os.environ.get(k)
try:
_ApplyDictToEnvironment(env_variable_dict)
yield
finally:
_ApplyDictToEnvironment(old_values)
@contextmanager
def _SetBotoConfigFileForTest(boto_config_path):
"""Sets a given file as the boto config file for a single test.
This function applies only the configuration in boto_config_path and will
ignore existing configuration. It should not be called directly by tests;
instead, use SetBotoConfigForTest.
Args:
boto_config_path: Path to config file to use.
Yields:
When configuration has been applied, and again when reverted.
"""
# Setup for entering "with" block.
try:
old_boto_config_env_variable = os.environ['BOTO_CONFIG']
boto_config_was_set = True
except KeyError:
boto_config_was_set = False
os.environ['BOTO_CONFIG'] = boto_config_path
try:
yield
finally:
# Teardown for exiting "with" block.
if boto_config_was_set:
os.environ['BOTO_CONFIG'] = old_boto_config_env_variable
else:
os.environ.pop('BOTO_CONFIG', None)
def GetTestNames():
"""Returns a list of the names of the test modules in gslib.tests."""
matcher = re.compile(r'^test_(?P<name>.*)$')
names = []
for _, modname, _ in pkgutil.iter_modules(gslib_tests.__path__):
m = matcher.match(modname)
if m:
names.append(m.group('name'))
return names
def is_file(obj):
if six.PY2:
return isinstance(obj, file) # pylint: disable=undefined-variable
return isinstance(obj, io.IOBase)
def MakeBucketNameValid(name):
"""Returns a copy of the given name with any invalid characters replaced.
Args:
name Union[str, unicode, bytes]: The bucket name to transform into a valid name.
Returns:
Union[str, unicode, bytes] The version of the bucket name containing only
valid characters.
"""
# Neither underscores nor uppercase letters are valid characters for a
# bucket name. Replace those with hyphens and lowercase characters.
if isinstance(name, (six.text_type, six.binary_type)):
return name.replace('_', '-').lower()
else:
raise TypeError('Unable to format name. Incorrect Type: {0}'.format(
type(name)))
@contextmanager
def WorkingDirectory(new_working_directory):
"""Changes the working directory for the duration of a 'with' call.
Args:
new_working_directory: The directory to switch to before executing wrapped
code. A None value indicates that no switching is necessary.
Yields:
Once after working directory has been changed.
"""
prev_working_directory = None
try:
prev_working_directory = os.getcwd()
except OSError:
# This can happen if the current working directory no longer exists.
pass
if new_working_directory:
os.chdir(new_working_directory)
try:
yield
finally:
if new_working_directory and prev_working_directory:
os.chdir(prev_working_directory)
def InvokedFromParFile():
loader = globals().get('__loader__', None)
if not loader:
return False
return 'zipimport' in loader.__class__.__module__
def SkipForParFile(reason):
if InvokedFromParFile():
return unittest.skip(reason)
else:
return lambda func: func
# Custom test callbacks must be pickleable, and therefore at global scope.
class HaltingCopyCallbackHandler(object):
"""Test callback handler for intentionally stopping a resumable transfer."""
def __init__(self, is_upload, halt_at_byte):
self._is_upload = is_upload
self._halt_at_byte = halt_at_byte
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if total_bytes_transferred >= self._halt_at_byte:
sys.stderr.write(
'Halting transfer after byte %s. %s/%s transferred.\r\n' %
(self._halt_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
if self._is_upload:
raise ResumableUploadException('Artifically halting upload.')
else:
raise ResumableDownloadException('Artifically halting download.')
class HaltOneComponentCopyCallbackHandler(object):
"""Test callback handler for stopping part of a sliced download."""
def __init__(self, halt_at_byte):
self._last_progress_byte = None
self._halt_at_byte = halt_at_byte
# pylint: disable=invalid-name
# pylint: disable=unused-argument
def call(self, current_progress_byte, total_size_unused):
"""Forcibly exits if the passed the halting point since the last call."""
if (self._last_progress_byte is not None and
self._last_progress_byte < self._halt_at_byte < current_progress_byte):
sys.stderr.write('Halting transfer.\r\n')
raise ResumableDownloadException('Artifically halting download.')
self._last_progress_byte = current_progress_byte
class TestParams(object):
"""Allows easier organization of test parameters.
This class allows grouping of test parameters, which include args and kwargs
to be used, as well as the expected result based on those arguments.
For example, to test an Add function, one might do:
params = TestParams(args=(1, 2, 3), expected=6)
self.assertEqual(Add(*(params.args)), params.expected)
"""
def __init__(self, args=None, kwargs=None, expected=None):
self.args = tuple() if args is None else args
self.kwargs = dict() if kwargs is None else kwargs
self.expected = expected
if not isinstance(args, (tuple, list)):
raise TypeError('TestParam args must be a tuple or list.')
if not isinstance(self.kwargs, dict):
raise TypeError('TestParam kwargs must be a dict.')
class CaptureStdout(list):
"""Context manager.
For example, this function has the lines printed by the function call
stored as a list in output:
with CaptureStdout() as output:
function(input_to_function)
"""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = cStringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio
sys.stdout = self._stdout
|
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bits and pieces used by the driver that don't really fit elsewhere."""
import collections
import struct
from pymongo.message import _Query
import bson
import pymongo
from bson.codec_options import CodecOptions
from bson.py3compat import itervalues, string_type, iteritems, u
from bson.son import SON
from pymongo.errors import (CursorNotFound,
DuplicateKeyError,
ExecutionTimeout,
NotMasterError,
OperationFailure,
WriteError,
WriteConcernError,
WTimeoutError)
_UUNDER = u("_")
def _gen_index_name(keys):
"""Generate an index name from the set of fields it is over."""
return _UUNDER.join(["%s_%s" % item for item in keys])
def _index_list(key_or_list, direction=None):
"""Helper to generate a list of (key, direction) pairs.
Takes such a list, or a single key, or a single key and direction.
"""
if direction is not None:
return [(key_or_list, direction)]
else:
if isinstance(key_or_list, string_type):
return [(key_or_list, pymongo.ASCENDING)]
elif not isinstance(key_or_list, (list, tuple)):
raise TypeError("if no direction is specified, "
"key_or_list must be an instance of list")
return key_or_list
def _index_document(index_list):
"""Helper to generate an index specifying document.
Takes a list of (key, direction) pairs.
"""
if isinstance(index_list, collections.Mapping):
raise TypeError("passing a dict to sort/create_index/hint is not "
"allowed - use a list of tuples instead. did you "
"mean %r?" % list(iteritems(index_list)))
elif not isinstance(index_list, (list, tuple)):
raise TypeError("must use a list of (key, direction) pairs, "
"not: " + repr(index_list))
if not len(index_list):
raise ValueError("key_or_list must not be the empty list")
index = SON()
for (key, value) in index_list:
if not isinstance(key, string_type):
raise TypeError("first item in each key pair must be a string")
if not isinstance(value, (string_type, int, collections.Mapping)):
raise TypeError("second item in each key pair must be 1, -1, "
"'2d', 'geoHaystack', or another valid MongoDB "
"index specifier.")
index[key] = value
return index
def _unpack_response(response, cursor_id=None, codec_options=CodecOptions()):
"""Unpack a response from the database.
Check the response for errors and unpack, returning a dictionary
containing the response data.
Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or
OperationFailure.
:Parameters:
- `response`: byte string as returned from the database
- `cursor_id` (optional): cursor_id we sent to get this response -
used for raising an informative exception when we get cursor id not
valid at server response
- `codec_options` (optional): an instance of
:class:`~bson.codec_options.CodecOptions`
"""
response_flag = struct.unpack("<i", response[:4])[0]
if response_flag & 1:
# Shouldn't get this response if we aren't doing a getMore
assert cursor_id is not None
# Fake a getMore command response. OP_GET_MORE provides no document.
msg = "Cursor not found, cursor id: %d" % (cursor_id,)
errobj = {"ok" : 0, "errmsg" : msg, "code" : 43}
raise CursorNotFound(msg, 43, errobj)
elif response_flag & 2:
error_object = bson.BSON(response[20:]).decode()
# Fake the ok field if it doesn't exist.
error_object.setdefault("ok", 0)
if error_object["$err"].startswith("not master"):
raise NotMasterError(error_object["$err"], error_object)
elif error_object.get("code") == 50:
raise ExecutionTimeout(error_object.get("$err"),
error_object.get("code"),
error_object)
raise OperationFailure("database error: %s" %
error_object.get("$err"),
error_object.get("code"),
error_object)
result = {}
result["cursor_id"] = struct.unpack("<q", response[4:12])[0]
result["starting_from"] = struct.unpack("<i", response[12:16])[0]
result["number_returned"] = struct.unpack("<i", response[16:20])[0]
result["data"] = bson.decode_all(response[20:], codec_options)
assert len(result["data"]) == result["number_returned"]
return result
def _check_command_response(response, msg=None, allowable_errors=None):
"""Check the response to a command for errors.
"""
if "ok" not in response:
# Server didn't recognize our message as a command.
raise OperationFailure(response.get("$err"),
response.get("code"),
response)
# TODO: remove, this is moving to _check_gle_response
if response.get("wtimeout", False):
# MongoDB versions before 1.8.0 return the error message in an "errmsg"
# field. If "errmsg" exists "err" will also exist set to None, so we
# have to check for "errmsg" first.
raise WTimeoutError(response.get("errmsg", response.get("err")),
response.get("code"),
response)
if not response["ok"]:
details = response
# Mongos returns the error details in a 'raw' object
# for some errors.
if "raw" in response:
for shard in itervalues(response["raw"]):
# Grab the first non-empty raw error from a shard.
if shard.get("errmsg") and not shard.get("ok"):
details = shard
break
errmsg = details["errmsg"]
if allowable_errors is None or errmsg not in allowable_errors:
# Server is "not master" or "recovering"
if (errmsg.startswith("not master")
or errmsg.startswith("node is recovering")):
raise NotMasterError(errmsg, response)
# Server assertion failures
if errmsg == "db assertion failure":
errmsg = ("db assertion failure, assertion: '%s'" %
details.get("assertion", ""))
raise OperationFailure(errmsg,
details.get("assertionCode"),
response)
# Other errors
code = details.get("code")
# findAndModify with upsert can raise duplicate key error
if code in (11000, 11001, 12582):
raise DuplicateKeyError(errmsg, code, response)
elif code == 50:
raise ExecutionTimeout(errmsg, code, response)
msg = msg or "%s"
raise OperationFailure(msg % errmsg, code, response)
def _check_gle_response(response):
"""Return getlasterror response as a dict, or raise OperationFailure."""
response = _unpack_response(response)
assert response["number_returned"] == 1
result = response["data"][0]
# Did getlasterror itself fail?
_check_command_response(result)
if result.get("wtimeout", False):
# MongoDB versions before 1.8.0 return the error message in an "errmsg"
# field. If "errmsg" exists "err" will also exist set to None, so we
# have to check for "errmsg" first.
raise WTimeoutError(result.get("errmsg", result.get("err")),
result.get("code"),
result)
error_msg = result.get("err", "")
if error_msg is None:
return result
if error_msg.startswith("not master"):
raise NotMasterError(error_msg, result)
details = result
# mongos returns the error code in an error object for some errors.
if "errObjects" in result:
for errobj in result["errObjects"]:
if errobj.get("err") == error_msg:
details = errobj
break
code = details.get("code")
if code in (11000, 11001, 12582):
raise DuplicateKeyError(details["err"], code, result)
raise OperationFailure(details["err"], code, result)
def _first_batch(sock_info, namespace, query,
ntoreturn, slave_ok, codec_options, read_preference):
"""Simple query helper for retrieving a first (and possibly only) batch."""
query = _Query(
0, namespace, 0, ntoreturn, query, None,
codec_options, read_preference, 0, ntoreturn)
request_id, msg, max_doc_size = query.get_message(slave_ok,
sock_info.is_mongos)
sock_info.send_message(msg, max_doc_size)
response = sock_info.receive_message(1, request_id)
return _unpack_response(response, None, codec_options)
def _check_write_command_response(results):
"""Backward compatibility helper for write command error handling.
"""
errors = [res for res in results
if "writeErrors" in res[1] or "writeConcernError" in res[1]]
if errors:
# If multiple batches had errors
# raise from the last batch.
offset, result = errors[-1]
# Prefer write errors over write concern errors
write_errors = result.get("writeErrors")
if write_errors:
# If the last batch had multiple errors only report
# the last error to emulate continue_on_error.
error = write_errors[-1]
error["index"] += offset
if error.get("code") == 11000:
raise DuplicateKeyError(error.get("errmsg"), 11000, error)
raise WriteError(error.get("errmsg"), error.get("code"), error)
else:
error = result["writeConcernError"]
if "errInfo" in error and error["errInfo"].get('wtimeout'):
# Make sure we raise WTimeoutError
raise WTimeoutError(
error.get("errmsg"), error.get("code"), error)
raise WriteConcernError(
error.get("errmsg"), error.get("code"), error)
def _upconvert_write_result(operation, command, result):
"""Convert a legacy write result to write commmand format."""
# Based on _merge_legacy from bulk.py
affected = result.get("n", 0)
res = {"ok": 1, "n": affected}
errmsg = result.get("errmsg", result.get("err", ""))
if errmsg:
# The write was successful on at least the primary so don't return.
if result.get("wtimeout"):
res["writeConcernError"] = {"errmsg": errmsg,
"code": 64,
"errInfo": {"wtimeout": True}}
else:
# The write failed.
error = {"index": 0,
"code": result.get("code", 8),
"errmsg": errmsg}
if "errInfo" in result:
error["errInfo"] = result["errInfo"]
res["writeErrors"] = [error]
return res
if operation == "insert":
# GLE result for insert is always 0 in most MongoDB versions.
res["n"] = 1
elif operation == "update":
res["nModified"] = 0
if "upserted" in result:
res["upserted"] = [{"index": 0, "_id": result["upserted"]}]
# Versions of MongoDB before 2.6 don't return the _id for an
# upsert if _id is not an ObjectId.
elif result.get("updatedExisting") is False and affected == 1:
# If _id is in both the update document *and* the query spec
# the update document _id takes precedence.
_id = command["u"].get("_id", command["q"].get("_id"))
res["upserted"] = [{"index": 0, "_id": _id}]
else:
res["nModified"] = affected
return res
def _fields_list_to_dict(fields, option_name):
"""Takes a sequence of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
if isinstance(fields, collections.Mapping):
return fields
if isinstance(fields, collections.Sequence):
if not all(isinstance(field, string_type) for field in fields):
raise TypeError("%s must be a list of key names, each an "
"instance of %s" % (option_name,
string_type.__name__))
return dict.fromkeys(fields, 1)
raise TypeError("%s must be a mapping or "
"list of key names" % (option_name,))
|
|
#encoding: utf-8
from django.utils.translation import ugettext_lazy
from django.utils.translation import ugettext as _
from django.utils import simplejson
from django.views.generic.list_detail import object_list, object_detail
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.db.models import Count, Q
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader, RequestContext
from django.core.urlresolvers import reverse
from tagging.models import Tag, TaggedItem
from tagging.views import tagged_object_list
from tagging.utils import get_tag
import tagging
from actstream import action
from knesset.utils import limit_by_request
from knesset.laws.models import *
from knesset.mks.models import Member
from knesset.tagvotes.models import TagVote
from knesset.hashnav.views import ListDetailView
from knesset.hashnav import DetailView, ListView, method_decorator
from knesset.agendas.models import Agenda
import urllib
import urllib2
import difflib
import logging
import datetime
logger = logging.getLogger("open-knesset.laws.views")
def bill_tags_cloud(request, min_posts_count=1):
member = None
if 'member' in request.GET:
member = get_object_or_404(Member, pk=request.GET['member'])
tags_cloud = Tag.objects.usage_for_queryset(member.bills.all(),counts=True)
tags_cloud = tagging.utils.calculate_cloud(tags_cloud)
title = _('Bills by %(member)s by tag') % {'member':member.name}
else:
title = _('Bills by tag')
tags_cloud = Tag.objects.cloud_for_model(Bill)
return render_to_response("laws/bill_tags_cloud.html",
{"tags_cloud": tags_cloud, "title":title, "member":member}, context_instance=RequestContext(request))
def bill_tag(request, tag):
tag_instance = get_tag(tag)
if tag_instance is None:
raise Http404(_('No Tag found matching "%s".') % tag)
extra_context = {'tag':tag_instance}
extra_context['tag_url'] = reverse('bill-tag',args=[tag_instance])
if 'member' in request.GET:
try:
member_id = int(request.GET['member'])
except ValueError:
raise Http404(_('No Member found matching "%s".') % request.GET['member'])
extra_context['member'] = get_object_or_404(Member, pk=request.GET['member'])
extra_context['member_url'] = reverse('member-detail',args=[extra_context['member'].id])
extra_context['title'] = _('Bills tagged %(tag)s by %(member)s') % {'tag': tag, 'member':extra_context['member'].name}
qs = extra_context['member'].bills.all()
else: # only tag is given
extra_context['title'] = _('Bills tagged %(tag)s') % {'tag': tag}
qs = Bill
queryset = TaggedItem.objects.get_by_model(qs, tag_instance)
bill_proposers = [b.proposers.all() for b in TaggedItem.objects.get_by_model(Bill, tag_instance)]
d = {}
for bill in bill_proposers:
for p in bill:
d[p] = d.get(p,0)+1
# now d is a dict: MK -> number of proposals in this tag
mks = d.keys()
for mk in mks:
mk.count = d[mk]
mks = tagging.utils.calculate_cloud(mks)
extra_context['members'] = mks
return object_list(request, queryset,
#return tagged_object_list(request, queryset_or_model=qs, tag=tag,
template_name='laws/bill_list_by_tag.html', extra_context=extra_context)
def bill_auto_complete(request):
if request.method != 'GET':
raise Http404
if not 'query' in request.GET:
raise Http404
options = Bill.objects.filter(title__icontains=request.GET['query'])[:30]
data = []
suggestions = []
for i in options:
data.append(i.id)
suggestions.append(i.title)
result = { 'query': request.GET['query'], 'suggestions':suggestions, 'data':data }
return HttpResponse(simplejson.dumps(result), mimetype='application/json')
def vote_tags_cloud(request, min_posts_count=1):
member = None
if 'member' in request.GET:
member = get_object_or_404(Member, pk=request.GET['member'])
tags_cloud = Tag.objects.usage_for_queryset(member.votes.all(),counts=True)
tags_cloud = tagging.utils.calculate_cloud(tags_cloud)
title = _('Votes by %(member)s by tag') % {'member':member.name}
else:
title = _('Votes by tag')
tags_cloud = Tag.objects.cloud_for_model(Vote)
return render_to_response("laws/vote_tags_cloud.html",
{"tags_cloud": tags_cloud, "title":title, "member":member}, context_instance=RequestContext(request))
def vote_tag(request, tag):
tag_instance = get_tag(tag)
if tag_instance is None:
raise Http404(_('No Tag found matching "%s".') % tag)
extra_context = {'tag':tag_instance}
extra_context['tag_url'] = reverse('vote-tag',args=[tag_instance])
if 'member' in request.GET:
extra_context['member'] = get_object_or_404(Member, pk=request.GET['member'])
extra_context['member_url'] = reverse('member-detail',args=[extra_context['member'].id])
extra_context['title'] = ugettext_lazy('Votes tagged %(tag)s by %(member)s') % {'tag': tag, 'member':extra_context['member'].name}
qs = extra_context['member'].votes.all()
else: # only tag is given
extra_context['title'] = ugettext_lazy('Votes tagged %(tag)s') % {'tag': tag}
qs = Vote
queryset = TaggedItem.objects.get_by_model(qs, tag_instance)
vote_attendence = [v.votes.all() for v in TaggedItem.objects.get_by_model(Vote, tag_instance)]
d = {}
for vote in vote_attendence:
for v in vote:
d[v] = d.get(v,0)+1
# now d is a dict: MK -> number of votes in this tag
mks = d.keys()
for mk in mks:
mk.count = d[mk]
average = float(sum([mk.count for mk in mks]))/len(mks)
mks = [mk for mk in mks if mk.count>=average]
mks = tagging.utils.calculate_cloud(mks)
extra_context['members'] = mks
return object_list(request, queryset,
#return tagged_object_list(request, queryset_or_model=qs, tag=tag,
template_name='laws/vote_list_by_tag.html', extra_context=extra_context)
class BillDetailView (DetailView):
allowed_methods = ['GET', 'POST']
def get_context(self, *args, **kwargs):
context = super(BillDetailView, self).get_context(*args, **kwargs)
bill = context['object']
try:
context['title'] = "%s %s" % (bill.law.title, bill.title)
except AttributeError:
context['title'] = bill.title
try:
kp = bill.knesset_proposal
t = kp.law.title + ' ' + kp.title
vs = Vote.objects.values('title','id')
vs_titles = [v['title'] for v in vs]
close_votes = difflib.get_close_matches(t, vs_titles, cutoff=0.5)
all_bill_votes = []
all_bill_votes.extend(bill.pre_votes.values_list('id',flat=True))
if bill.first_vote:
all_bill_votes.append(bill.first_vote.id)
if bill.approval_vote:
all_bill_votes.append(bill.approval_vote.id)
close_votes = [(v['id'],v['title']) for v in vs if v['title'] in close_votes and v['id'] not in all_bill_votes]
context['close_votes'] = close_votes
except Exception, e:
pass
return context
@method_decorator(login_required)
def POST(self, object_id, **kwargs):
vote = None
bill = get_object_or_404(Bill, pk=object_id)
user_input_type = self.request.POST.get('user_input_type')
if user_input_type == 'approval vote':
vote = Vote.objects.get(pk=self.request.POST.get('vote_id'))
bill.approval_vote = vote
bill.update_stage()
if user_input_type == 'first vote':
vote = Vote.objects.get(pk=self.request.POST.get('vote_id'))
bill.first_vote = vote
bill.update_stage()
if user_input_type == 'pre vote':
vote = Vote.objects.get(pk=self.request.POST.get('vote_id'))
bill.pre_votes.add(vote)
bill.update_stage()
action.send(self.request.user, verb='added-vote-to-bill',
description=vote,
target=bill,
timestamp=datetime.datetime.now())
return HttpResponseRedirect(".")
_('added-vote-to-bill')
class BillListView (ListView):
friend_pages = [
('stage','all',_('All stages')),
]
friend_pages.extend([('stage',x[0],_(x[1])) for x in BILL_STAGE_CHOICES])
bill_stages = { 'proposed':Q(stage__isnull=False),
'pre':Q(stage='2')|Q(stage='3')|Q(stage='4')|Q(stage='5')|Q(stage='6'),
'first':Q(stage='4')|Q(stage='5')|Q(stage='6'),
'approved':Q(stage='6'),
}
bill_stages_names = { 'proposed':_('(Bills) proposed'),
'pre':_('(Bills) passed pre-vote'),
'first':_('(Bills) passed first vote'),
'approved':_('(Bills) approved'),
}
def get_queryset(self):
stage = self.request.GET.get('stage', False)
booklet = self.request.GET.get('booklet', False)
member = self.request.GET.get('member', False)
if member:
member = get_object_or_404(Member, pk=member)
qs = member.bills.all()
else:
qs = self.queryset._clone()
if stage and stage!='all':
if stage in self.bill_stages:
qs = qs.filter(self.bill_stages[stage])
else:
qs = qs.filter(stage=stage)
elif booklet:
kps = KnessetProposal.objects.filter(booklet_number=booklet).values_list('id',flat=True)
qs = qs.filter(knesset_proposal__in=kps)
return qs
def get_context(self):
context = super(BillListView, self).get_context()
r = [['?%s=%s'% (x[0],x[1]),x[2],False,x[1]] for x in self.friend_pages]
stage = self.request.GET.get('stage', False)
booklet = self.request.GET.get('booklet', False)
member = self.request.GET.get('member', False)
if stage and stage!='all':
for x in r:
if x[3]==stage:
x[2] = True
break
if stage in self.bill_stages_names:
context['stage'] = self.bill_stages_names.get(stage)
context['title'] = _('Bills %(stage)s') % {'stage':context['stage']}
elif booklet:
context['title']=_('Bills published in knesset booklet number %s') % booklet
else:
r[0][2] = True
if member:
context['member'] = get_object_or_404(Member, pk=member)
context['member_url'] = reverse('member-detail',args=[context['member'].id])
if stage in self.bill_stages_names:
context['title'] = _('Bills %(stage)s by %(member)s') % {'stage': self.bill_stages_names[stage], 'member':context['member'].name}
else:
context['title'] = _('Bills by %(member)s') % {'member':context['member'].name}
context['friend_pages'] = r
return context
class VoteListView(ListView):
session_votes_key = 'selected_votes'
friend_pages = [
('type','all',_('All votes')),
('type','law-approve', _('Law Approvals')),
('type','second-call', _('Second Call')),
('type','demurrer', _('Demurrer')),
('type','no-confidence', _('Motion of no confidence')),
('type','pass-to-committee', _('Pass to committee')),
('type','continuation', _('Continuation')),
('tagged','all',_('All')),
('tagged','false',_('Untagged Votes')),
('tagged','true',_('Tagged Votes')),
('since','7',_('Last Week')),
('since','30',_('Last Month')),
('since','all',_('All times')),
('order','time',_('Time')),
('order','controversy', _('Controversy')),
('order','against-party',_('Against Party')),
('order','votes',_('Number of votes')),
]
def get_queryset(self, **kwargs):
saved_selection = self.request.session.get(self.session_votes_key, dict())
self.options = {}
for key in ['type', 'tagged', 'since', 'order']:
self.options[key] = self.request.GET.get(key,
saved_selection.get(key, None))
return Vote.objects.filter_and_order(**self.options)
def get_context(self):
context = super(VoteListView, self).get_context()
friend_page = {}
for key in ['type', 'tagged', 'since', 'order']:
if self.options[key]:
friend_page[key] = urllib.quote(self.options[key].encode('utf8'))
else:
friend_page[key] = 'all' if key!='order' else 'time'
self.request.session[self.session_votes_key] = friend_page
r = {}
for key, value, name in self.friend_pages:
page = friend_page.copy()
current = False
if page[key]==value:
current = True
if key=='type':
context['title'] = name
else:
page[key] = value
url = "./?%s" % urllib.urlencode(page)
if key not in r:
r[key] = []
r[key].append((url, name, current))
context['friend_pages'] = r
if self.request.user.is_authenticated():
context['watched_members'] = \
self.request.user.get_profile().members
else:
context['watched_members'] = False
return context
class VoteDetailView(DetailView):
template_resource_name = 'vote'
def get_context(self):
context = super(VoteDetailView, self).get_context()
vote = context['vote']
context['title'] = vote.title
related_bills = list(vote.bills_pre_votes.all())
if Bill.objects.filter(approval_vote=vote).count()>0:
related_bills.append(vote.bill_approved)
if Bill.objects.filter(first_vote=vote).count()>0:
related_bills.extend(vote.bills_first.all())
context['bills'] = related_bills
if self.request.user.is_authenticated():
context['agendavotes'] = vote.agendavotes.filter(agenda__in=Agenda.objects.get_relevant_for_user(user=self.request.user))
else:
context['agendavotes'] = vote.agendavotes.filter(agenda__in=Agenda.objects.get_relevant_for_user(user=None))
return context
@login_required
def suggest_tag(request, object_type, object_id):
"""add a POSTed tag_id to object_type object_id, and also vote this tagging up by the current user"""
ctype = get_object_or_404(ContentType,model=object_type)
model_class = ctype.model_class()
if request.method == 'POST' and 'tag_id' in request.POST: # If the form has been submitted...
#o = model_class.objects.get(pk=object_id)
tag = get_object_or_404(Tag,pk=request.POST['tag_id'])
(ti, created) = TaggedItem._default_manager.get_or_create(tag=tag, content_type=ctype, object_id=object_id)
(tv, created) = TagVote.objects.get_or_create(tagged_item=ti, user=request.user, defaults={'vote': 0})
tv.vote = +1
tv.save()
action.send(request.user,verb='tag-voted', target=ti, description='Vote Up')
return HttpResponse("OK")
@login_required
def vote_on_tag(request, object_type, object_id, tag_id, vote):
"""request.user is voting vote (-1/0/+1) for tag on object_type with object_id
Can be used to vote on a tagged vote, or a tagged bill"""
try:
ctype = ContentType.objects.get(model=object_type)
model_class = ctype.model_class()
o = model_class.objects.get(pk=object_id)
ti = TaggedItem.objects.filter(tag__id=tag_id).filter(object_id=o.id)[0]
(tv, created) = TagVote.objects.get_or_create(tagged_item=ti, user=request.user, defaults={'vote': 0})
vote = int(vote) # vote is u'-1',u'0',u'+1' (not a Vote model)
if vote > 0:
tv.vote = 1
action.send(request.user,verb='tag-voted', target=ti, description='Vote Up')
elif vote < 0:
tv.vote = -1
action.send(request.user,verb='voted down on a tag', target=ti, description='Vote Down')
else:
tv.vote = 0
tv.save()
except:
pass
return HttpResponseRedirect("/%s/%s" % (object_type,object_id))
def tagged(request,tag):
title = ugettext_lazy('Votes tagged %(tag)s') % {'tag': tag}
try:
return tagged_object_list(request, queryset_or_model = Vote, tag=tag, extra_context={'title':title})
except Http404:
return object_list(request, queryset=Vote.objects.none(), extra_context={'title':title})
def vote_auto_complete(request):
if request.method != 'GET':
raise Http404
if not 'query' in request.GET:
raise Http404
options = Vote.objects.filter(title__icontains=request.GET['query'])[:30]
data = []
suggestions = []
for i in options:
data.append(i.id)
suggestions.append(i.title)
result = { 'query': request.GET['query'], 'suggestions':suggestions, 'data':data }
return HttpResponse(simplejson.dumps(result), mimetype='application/json')
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This file contains a set of internal Spreadsheet basic classes used by
others:
StandardWidgetHeaderView
StandardWidgetItemDelegate
StandardWidgetSheet
"""
from __future__ import division
from PyQt4 import QtCore, QtGui
from .spreadsheet_helpers import CellHelpers, CellResizer
class StandardWidgetHeaderView(QtGui.QHeaderView):
"""
StandardWidgetHeaderView is the standard header view (containing
column/row labels) inheriting from QHeaderView. The main
difference between this class and the original one is that it
allows resizing and stretching at the same time
"""
THICKNESS = 30
MINIMUM_SIZE = 50
fitToWindow = True
def __init__(self, orientation, parent=None):
""" StandardWidgetHeaderView(orientation: QtCore.Qt.Align...,
parent: QWidget)
-> StandardWidgetHeaderView
Initialize the header view to be like the one in the spreadsheet table
"""
QtGui.QHeaderView.__init__(self, orientation, parent)
self.setMovable(True)
self.setFont(QtGui.QFont("Helvetica",12,QtGui.QFont.Bold))
self.resizeSections(QtGui.QHeaderView.Stretch)
self.setClickable(True)
self.setHighlightSections(True)
if orientation==QtCore.Qt.Vertical:
self.setDefaultAlignment(QtCore.Qt.AlignHCenter |
QtCore.Qt.AlignVCenter)
self.connect(self, QtCore.SIGNAL('sectionResized(int, int, int)'),
self.section_resized)
self._target_size = None
section_sizes = None
def read_section_sizes(self):
if (self.section_sizes is None or
len(self.section_sizes) != self.count()):
self.section_sizes = [float(self.sectionSize(self.logicalIndex(i)))
for i in xrange(self.count())]
_resizing = False
def section_resized(self, log_index, old_size, new_size):
if self._resizing:
return
else:
self._resizing = True
try:
self._section_resized(log_index, old_size, new_size)
finally:
self._resizing = False
def resize_right_rows(self, vis_index):
if self._resizing:
return
else:
self._resizing = True
try:
self._resize_right_rows(vis_index)
finally:
self._resizing = False
def _section_resized(self, log_index, old_size, new_size):
""" section_resized(horizontal: bool, log_index: int,
old_size: int, new_size: int) -> None
Called when a section of of the header is resized
"""
if not self.fitToWindow or self._target_size is None:
return
vis_index = self.visualIndex(log_index)
if vis_index == self.count() - 1:
self.resizeSection(log_index, old_size)
return
orig_new_size = new_size
# Can't shrink below minimum size
if new_size < old_size and new_size < self.MINIMUM_SIZE:
new_size = self.MINIMUM_SIZE
if self._target_size is None:
return
# Can't take other cells below minimum size
if new_size > old_size:
min_right = 0
for i in xrange(vis_index + 1, self.count()):
if not self.isSectionHidden(self.logicalIndex(i)):
min_right += self.MINIMUM_SIZE
pos = self.sectionPosition(log_index)
total_right = self._target_size - pos - new_size
if total_right < min_right:
new_size = self._target_size - pos - min_right
if new_size != orig_new_size:
self.resizeSection(log_index, new_size)
# Resize the rows to the right
self.read_section_sizes()
self.section_sizes[vis_index] = float(new_size)
self._resize_right_rows(vis_index + 1)
def _resize_right_rows(self, vis_index):
self.read_section_sizes()
previous_space = sum(self.section_sizes[vis_index:])
new_space = self._target_size - sum(self.section_sizes[:vis_index])
# If we are growing the sections
if new_space > previous_space:
allocated_space = new_space - previous_space
for i, size in enumerate(self.section_sizes[vis_index:], vis_index):
size += allocated_space * (size / previous_space)
self.section_sizes[i] = size
self.resizeSection(self.logicalIndex(i), size)
# If we are shrinking the sections
else:
reclaimed_space = previous_space - new_space
for i, size in enumerate(self.section_sizes[vis_index:], vis_index):
size -= reclaimed_space * (
(size - self.MINIMUM_SIZE)/previous_space)
self.section_sizes[i] = size
self.resizeSection(self.logicalIndex(i), size)
def sizeHint(self):
""" sizeHint() -> QSize
Set a default thickness of the bar to 30
"""
size = QtGui.QHeaderView.sizeHint(self)
if self.orientation()==QtCore.Qt.Vertical:
size.setWidth(self.THICKNESS)
else:
size.setHeight(self.THICKNESS)
return size
class StandardWidgetItemDelegate(QtGui.QItemDelegate):
"""
StandardWidgetItemDelegate will replace the QTableWidget default
display to have a padding around every cell widget
"""
def __init__(self, table):
""" StandardWidgetItemDelegate(table: QTableWidget)
-> StandardWidgetItemDelegate
Initialize to store a table and padding
"""
self.table = table
self.padding = 4
QtGui.QItemDelegate.__init__(self, None)
def setPadding(self, padding):
""" setPadding(padding: int) -> None
Re-set padding to a different value
"""
if self.padding!=padding:
self.padding = padding
def updateEditorGeometry(self, editor, option, index):
""" updateEditorGeometry(editor: QWidget,
option: QStyleOptionViewItem,
index: QModelIndex) -> None
Make sure the widget only occupied inside the padded area
"""
rect = self.table.visualRect(index)
rect.adjust(self.padding,self.padding,-self.padding,-self.padding)
editor.setGeometry(rect)
editor.setFixedSize(rect.width(), rect.height())
def paint(self, painter, option, index):
""" paint(painter: QPainter, option: QStyleOptionViewItem,
index: QModelIndex) -> None
Paint the current cell with a ring outside
"""
QtGui.QItemDelegate.paint(self, painter, option, index)
if ((index.row(), index.column())==self.table.activeCell):
painter.save()
painter.setPen(QtGui.QPen(QtGui.QBrush(
QtGui.QColor(0.8549*255, 0.6971*255, 0.2255*255)), self.padding))
r = self.table.visualRect(index)
painter.setClipRegion(QtGui.QRegion(r))
r.adjust(self.padding//2, self.padding//2,
-self.padding//2, -self.padding//2)
painter.drawRoundedRect(r, self.padding, self.padding)
painter.restore()
class StandardWidgetSheet(QtGui.QTableWidget):
"""
StandardWidgetSheet is a standard sheet that can contain any type
of cell widget. Each of them will be put into a separate cell. In
the case of vtkRenderWindow, where creating each sheet separately
can end up with a large number of GL contexts, a special type of
VTK sheet need to be derived from this one
"""
def __init__(self, rows=0, cols=0, parent=None):
""" StandardWidgetSheet(rows: int, cols: int, parent: QWidget)
-> StandardWidgetSheet
Construct a sheet with rows x cols cells
"""
QtGui.QTableWidget.__init__(self, 0, 0, parent)
self.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.fitToWindow = False
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setHorizontalHeader(StandardWidgetHeaderView(QtCore.Qt.Horizontal,
self))
self.horizontalHeader().setSelectionModel(self.selectionModel())
self.connect(self.horizontalHeader(),
QtCore.SIGNAL('sectionCountChanged(int, int)'),
self.updateColumnLabels)
self.connect(self.horizontalHeader(),
QtCore.SIGNAL('sectionMoved(int,int,int)'),
self.columnMoved)
self.connect(self.horizontalHeader(),
QtCore.SIGNAL('sectionPressed(int)'),
self.forceColumnMultiSelect)
self.setVerticalHeader(StandardWidgetHeaderView(QtCore.Qt.Vertical,
self))
self.verticalHeader().setSelectionModel(self.selectionModel())
self.connect(self.verticalHeader(),
QtCore.SIGNAL('sectionCountChanged(int, int)'),
self.updateRowLabels)
self.connect(self.verticalHeader(),
QtCore.SIGNAL('sectionMoved(int,int,int)'),
self.rowMoved)
self.connect(self.verticalHeader(),
QtCore.SIGNAL('sectionPressed(int)'),
self.forceRowMultiSelect)
# A hack to force the select all button in single click mode
cornerButton = self.findChild(QtGui.QAbstractButton)
if cornerButton:
self.connect(cornerButton,
QtCore.SIGNAL('clicked()'),
self.forceSheetSelect)
self.delegate = StandardWidgetItemDelegate(self)
self.setItemDelegate(self.delegate)
self.helpers = CellHelpers(parent, CellResizer(self))
self.setRowCount(rows)
self.setColumnCount(cols)
self.setFitToWindow(True)
self.connect(self,
QtCore.SIGNAL('cellActivated(int, int, bool)'),
self.selectCell)
self.activeCell = (-1,-1)
def forceColumnMultiSelect(self, logicalIndex):
""" forceColumnMultiSelect(logicalIndex: int) -> None
Make sure we always toggle the headerview in the right way
NOTE: the MultiSelection type of SelectionMode does not work
correctly for overlapping columns and rows selection
"""
if (self.selectionModel().isColumnSelected(logicalIndex, QtCore.QModelIndex())):
self.selectionModel().select(self.model().index(0, logicalIndex),
QtGui.QItemSelectionModel.Deselect |
QtGui.QItemSelectionModel.Columns)
else:
self.selectionModel().select(self.model().index(0, logicalIndex),
QtGui.QItemSelectionModel.Select |
QtGui.QItemSelectionModel.Columns)
def forceRowMultiSelect(self, logicalIndex):
""" forceRowMultiSelect(logicalIndex: int) -> None
Make sure we always toggle the headerview in the right way
NOTE: the MultiSelection type of SelectionMode does not work
correctly for overlapping columns and rows selection
"""
if (self.selectionModel().isRowSelected(logicalIndex, QtCore.QModelIndex())):
self.selectionModel().select(self.model().index(logicalIndex, 0),
QtGui.QItemSelectionModel.Deselect |
QtGui.QItemSelectionModel.Rows)
else:
self.selectionModel().select(self.model().index(logicalIndex, 0),
QtGui.QItemSelectionModel.Select |
QtGui.QItemSelectionModel.Rows)
def forceSheetSelect(self):
""" forceSheetSelect() -> None
Make sure we can toggle the whole sheet selection
"""
totalCells = self.rowCount()*self.columnCount()
if (len(self.selectionModel().selectedIndexes())<totalCells):
self.selectionModel().select(
QtGui.QItemSelection(self.model().index(0,0),
self.model().index(self.rowCount()-1,
self.columnCount()-1)),
QtGui.QItemSelectionModel.Select)
else:
self.selectionModel().clearSelection()
def updateHeaderStatus(self):
""" updateHeaderStatus() -> None
Update the visibility of the row and column header
"""
return
self.horizontalHeader().setVisible(self.columnCount() > 1 or
self.rowCount() > 1)
self.verticalHeader().setVisible(self.columnCount() > 1 or
self.rowCount() > 1)
def updateRowLabels(self, oldCount, newCount):
""" updateRowLabels(oldCount: int, newCount: int) -> None
Update vertical labels when the number of row changed
"""
vLabels = []
vIdx = self.verticalHeader().visualIndex
for i in xrange(newCount):
vLabels.append(str(vIdx(i)+1))
self.setVerticalHeaderLabels(vLabels)
self.updateHeaderStatus()
def rowMoved(self, row, old, new):
""" rowMove(row: int, old: int, new: int) -> None
Renumber the vertical header labels when rows moved
"""
self.updateRowLabels(self.rowCount(), self.rowCount())
def updateColumnLabels(self, oldCount, newCount):
""" updateColumnLabels(oldCount: int, newCount: int) -> None
Update horizontal labels when the number of column changed
"""
hLabels = []
vIdx = self.horizontalHeader().visualIndex
for i in xrange(newCount):
hLabels.append(chr(vIdx(i)+ord('A')))
self.setHorizontalHeaderLabels(hLabels)
self.updateHeaderStatus()
def columnMoved(self, row, old, new):
""" columnMoved(row: int, old: int, new: int) -> None
Renumber the horizontal header labels when columns moved
"""
self.updateColumnLabels(self.columnCount(), self.columnCount())
def setFitToWindow(self, fit=True):
""" setFitToWindow(fit: boolean) -> None
Force to fit all cells into the visible area. Set fit=False
for the scroll mode where hidden cell can be viewed by scrolling
the scrollbars.
"""
if fit!=self.fitToWindow:
self.fitToWindow = fit
self.horizontalHeader().fitToWindow = fit
self.horizontalHeader()._target_size = None
self.verticalHeader().fitToWindow = fit
self.verticalHeader()._target_size = None
if not fit:
width = self.columnWidth(self.columnCount()-1)
height = self.rowHeight(self.rowCount()-1)
self.setColumnWidth(self.columnCount()-1, width)
self.setRowHeight(self.rowCount()-1, height)
self.stretchCells()
policy = (QtCore.Qt.ScrollBarAlwaysOff if fit
else QtCore.Qt.ScrollBarAlwaysOn)
self.setHorizontalScrollBarPolicy(policy)
self.setVerticalScrollBarPolicy(policy)
def showEvent(self, event):
""" showEvent(event: QShowEvent) -> None
Make sure to stretch the sheet on the first appearance
"""
self.stretchCells()
def stretchCells(self):
""" stretchCells() -> None
Stretch all the cells with equally spaces to fit in the viewport
"""
if self.fitToWindow:
self.horizontalHeader().resizeSections(QtGui.QHeaderView.Stretch)
self.verticalHeader().resizeSections(QtGui.QHeaderView.Stretch)
def showHelpers(self, show, row, col):
""" showHelpers(show: boolean, row: int, col: int) -> None
Show/hide the helpers (resizer, toolbar) on the current cell
depending on the value of show
"""
if self.helpers.isInteracting():
return
if show:
if row>=0 and col>=0:
self.helpers.snapTo(row, col)
self.helpers.adjustPosition()
self.helpers.show()
else:
self.helpers.hide()
else:
self.helpers.hide()
def resizeEvent(self, e):
if not self.fitToWindow:
return
thickness = StandardWidgetHeaderView.THICKNESS
self.horizontalHeader()._target_size = self.size().width() - thickness
self.horizontalHeader().resize_right_rows(0)
self.verticalHeader()._target_size = self.size().height() - thickness
self.verticalHeader().resize_right_rows(0)
def getRealLocation(self, vRow, vCol, visual=False):
""" getRealLocation(vRow: int, vCol: int, visual: bool) -> (int, int)
Return the actual location even if there is spanning at (vRow, vCol)
"""
# Qt doesn't provide a mechanism to map from a cell to its
# span region, so we have to scan the whole spreadsheet N^2
# for now, but our spreadsheet is usuallly small enough.
if visual:
(row, col) = (vRow, vCol)
else:
row = self.verticalHeader().logicalIndex(vRow)
col = self.horizontalHeader().logicalIndex(vCol)
cellSet = set()
for r in xrange(self.rowCount()):
for c in xrange(self.columnCount()):
cellSet.add((r, c))
for r in xrange(self.rowCount()):
for c in xrange(self.columnCount()):
if (r, c) not in cellSet:
continue
rect = self.visualRect(self.model().index(r, c))
rSpan = self.rowSpan(r, c)
cSpan = self.columnSpan(r, c)
for rs in xrange(rSpan):
for cs in xrange(cSpan):
if (row==r+rs) and (col==c+cs):
return (r, c)
if (r+rs, c+cs) in cellSet:
cellSet.remove((r+rs, c+cs))
return (-1, -1)
def getCell(self, row, col):
""" getCell(row: int, col: int) -> QWidget
Get cell at a specific row and column
"""
return self.cellWidget(*self.getRealLocation(row, col))
def getCellRect(self, row, col):
""" getCellRect(row: int, col: int) -> QRect
Return the rectangle surrounding the cell at location (row, col)
in parent coordinates
"""
idx = self.model().index(*self.getRealLocation(row, col))
return self.visualRect(idx)
def getCellGlobalRect(self, row, col):
""" getCellGlobalRect(row: int, col: int) -> QRect
Return the rectangle surrounding the cell at location (row, col)
in global coordinates
"""
rect = self.getCellRect(row, col)
rect.moveTo(self.viewport().mapToGlobal(rect.topLeft()))
return rect
def setCellByWidget(self, row, col, cellWidget):
""" setCellByWidget(row: int,
col: int,
cellWidget: QWidget) -> None
Replace the current location (row, col) with a cell widget
"""
if cellWidget:
# Relax the size constraint of the widget
cellWidget.setMinimumSize(QtCore.QSize(0, 0))
cellWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
cellWidget.setParent(self)
(row, col) = self.getRealLocation(row, col)
index = self.model().index(row, col)
self.setCellWidget(row, col, cellWidget)
if cellWidget:
self.delegate.updateEditorGeometry(cellWidget, None, index)
if (row, col) == self.activeCell:
self.setActiveCell(row, col)
def selectCell(self, row, col, toggling):
""" selectCell(row: int, col: int, toggling: bool) -> None
Select a cell based on its current selection
"""
(row, col) = self.getRealLocation(row, col, visual=True)
if toggling:
self.selectionModel().setCurrentIndex(self.model().index(row, col),
QtGui.QItemSelectionModel.Toggle)
if (self.selectionModel().isSelected(self.model().index(row, col))):
self.setActiveCell(row, col)
else:
self.setActiveCell(-1, -1)
else:
if len(self.selectionModel().selectedIndexes())<=1:
self.selectionModel().setCurrentIndex(
self.model().index(row, col),
QtGui.QItemSelectionModel.ClearAndSelect)
self.setActiveCell(row, col)
self.viewport().repaint()
def setActiveCell(self, row, col):
""" setActiveCell(row: int, col: int) -> None
Set the location of an active cell also bring up the
corresponding toolbar
"""
self.activeCell = (row, col)
toolBar = self.parent().getCellToolBar(row, col)
if toolBar:
toolBar.snapTo(row, col)
self.parent().toolBar.setCellToolBar(toolBar)
def adjustWidgetGeometry(self, row, col):
""" setActiveCell(row: int, col: int) -> None
Adjust the widget at cell (row, col) to fit inside the cell
"""
cellWidget = self.getCell(row, col)
if cellWidget:
index = self.model().index(*self.getRealLocation(row, col))
self.delegate.updateEditorGeometry(cellWidget, None, index)
|
|
#!/usr/bin/env python
# Corey Brune - Feb 2017
#Description:
# This is a skeleton script which has all of the common functionality.
# The developer will only need to add the necessary arguments and functions
# then make the function calls in main_workflow().
#Requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""Description
Usage:
dx_skel.py ()
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_skel.py -h | --help | -v | --version
Description
Examples:
Options:
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_skel.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.0.000'
import sys
from os.path import basename
from time import sleep, time
from docopt import docopt
from delphixpy.exceptions import HttpError
from delphixpy.exceptions import JobError
from delphixpy.exceptions import RequestError
from delphixpy.web import job
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxLogging import print_exception
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
try:
#Setup the connection to the Delphix Engine
dx_session_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
if arguments['--vdb']:
#Get the database reference we are copying from the database name
database_obj = find_obj_by_name(dx_session_obj.server_session,
database, arguments['--vdb'])
except DlpxException as e:
print_exception('\nERROR: Engine {} encountered an error while'
'{}:\n{}\n'.format(engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dx_session_obj.job_mode(single_thread):
while (len(dx_session_obj.jobs) > 0 or len(thingstodo)> 0):
if len(thingstodo) > 0:
if OPERATION:
method_call
elif OPERATION:
method_call
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(dx_session_obj.server_session,
dx_session_obj.jobs[j])
print_debug(job_obj)
print_info('{}: Replication operations: {}'.format(
engine['hostname'], job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in 'RUNNING':
# If the job is in a running state, increment the
# running job count.
i += 1
print_info('{}: {:d} jobs running.'.format(
engine['hostname'], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments['--poll']))
except (HttpError, RequestError, JobError, DlpxException) as e:
print_exception('ERROR: Could not complete replication '
'operation:{}'.format(e))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
engine = None
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print 'Error encountered in run_job():\n{}'.format(e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dx_session_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: {}\n'.format(
(arguments['--engine'])))
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException('\nERROR: Delphix Engine {} cannot be '
'found in {}. Please check your value '
'and try again. Exiting.\n'.format(
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: {}'.format(
dx_session_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
#elapsed_minutes = round((time() - time_start)/60, +1)
#return elapsed_minutes
return round((time() - time_start)/60, +1)
def main(arguments):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dx_session_obj
global debug
if arguments['--debug']:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
#This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info('script took {:.2f} minutes to get this far.'.format(
elapsed_minutes))
#Here we handle what we do when the unexpected happens
except DlpxException as e:
print_exception('script encountered an error while processing the'
'config file:\n{}'.format(e))
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception('Connection failed to the Delphix Engine'
'Please check the ERROR message:\n{}'.format(e))
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception('A job failed in the Delphix Engine')
print_info('{} took {:.2f} minutes to get this far\n{}'.format(
basename(__file__), elapsed_minutes, e))
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
|
|
# coding: utf-8
"""Tools and helper functions for abinit calculations"""
from __future__ import unicode_literals, division
import os
import collections
import shutil
import operator
from six.moves import filter
from monty.string import list_strings
from monty.dev import deprecated
from pymatgen.util.string_utils import WildCard
import logging
logger = logging.getLogger(__name__)
class File(object):
"""
Very simple class used to store file basenames, absolute paths and directory names.
Provides wrappers for the most commonly used functions defined in os.path.
"""
def __init__(self, path):
self._path = os.path.abspath(path)
def __repr__(self):
return "<%s at %s, %s>" % (self.__class__.__name__, id(self), self.path)
def __str__(self):
return "<%s, %s>" % (self.__class__.__name__, self.path)
def __eq__(self, other):
return False if other is None else self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
@property
def path(self):
"""Absolute path of the file."""
return self._path
@property
def basename(self):
"""File basename."""
return os.path.basename(self.path)
@property
def relpath(self):
"""Relative path."""
return os.path.relpath(self.path)
@property
def dirname(self):
"""Absolute path of the directory where the file is located."""
return os.path.dirname(self.path)
@property
def exists(self):
"""True if file exists."""
return os.path.exists(self.path)
@property
def isncfile(self):
"""True if self is a NetCDF file"""
return self.basename.endswith(".nc")
def read(self):
"""Read data from file."""
with open(self.path, "r") as f:
return f.read()
def readlines(self):
"""Read lines from files."""
with open(self.path, "r") as f:
return f.readlines()
def write(self, string):
"""Write string to file."""
self.make_dir()
with open(self.path, "w") as f:
return f.write(string)
def writelines(self, lines):
"""Write a list of strings to file."""
self.make_dir()
with open(self.path, "w") as f:
return f.writelines()
def make_dir(self):
"""Make the directory where the file is located."""
if not os.path.exists(self.dirname):
os.makedirs(self.dirname)
def remove(self):
"""Remove the file."""
try:
os.remove(self.path)
except:
pass
class Directory(object):
"""
Very simple class that provides helper functions
wrapping the most commonly used functions defined in os.path.
"""
def __init__(self, path):
self._path = os.path.abspath(path)
def __repr__(self):
return "<%s at %s, %s>" % (self.__class__.__name__, id(self), self.path)
def __str__(self):
return "<%s, %s>" % (self.__class__.__name__, self.path)
def __eq__(self, other):
return False if other is None else self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
@property
def path(self):
"""Absolute path of the directory."""
return self._path
@property
def relpath(self):
"""Relative path."""
return os.path.relpath(self.path)
@property
def basename(self):
"""Directory basename."""
return os.path.basename(self.path)
def path_join(self, *p):
"""
Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components will be discarded.
"""
return os.path.join(self.path, *p)
@property
def exists(self):
"""True if file exists."""
return os.path.exists(self.path)
def makedirs(self):
"""
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist.
"""
if not self.exists:
os.makedirs(self.path)
def rmtree(self):
"""Recursively delete the directory tree"""
shutil.rmtree(self.path, ignore_errors=True)
def path_in(self, file_basename):
"""Return the absolute path of filename in the directory."""
return os.path.join(self.path, file_basename)
def list_filepaths(self, wildcard=None):
"""
Return the list of absolute filepaths in the directory.
Args:
wildcard:
String of tokens separated by "|".
Each token represents a pattern.
If wildcard is not None, we return only those files that
match the given shell pattern (uses fnmatch).
Example:
wildcard="*.nc|*.pdf" selects only those files that end with .nc or .pdf
"""
# Selecte the files in the directory.
fnames = [f for f in os.listdir(self.path)]
filepaths = filter(os.path.isfile, [os.path.join(self.path, f) for f in fnames])
# Filter using the shell patterns.
if wildcard is not None:
filepaths = WildCard(wildcard).filter(filepaths)
return filepaths
def has_abiext(self, ext):
"""
Returns the absolute path of the ABINIT file with extension ext.
Support both Fortran files and netcdf files. In the later case,
we check whether a file with extension ext + ".nc" is present
in the directory. Returns empty string is file is not present.
Raises:
ValueError if multiple files with the given ext are found.
This implies that this method is not compatible with multiple datasets.
"""
files = []
for f in self.list_filepaths():
if f.endswith(ext) or f.endswith(ext + ".nc"):
files.append(f)
if not files:
return ""
if len(files) > 1:
# ABINIT users must learn that multiple datasets are bad!
err_msg = "Found multiple files with the same extensions\n Please avoid the use of mutiple datasets!"
raise ValueError(err_msg)
return files[0]
# This dictionary maps ABINIT file extensions to the
# variables that must be used to read the file in input.
#
# TODO: It would be nice to pass absolute paths to abinit with getden_path
# so that I can avoid creating symbolic links before running but
# the presence of the C-bindings complicates the implementation
# (gfortran SIGFAULTs if I add strings to dataset_type!
_EXT2VARS = {
"DEN": {"irdden": 1},
"WFK": {"irdwfk": 1},
"SCR": {"irdscr": 1},
"QPS": {"irdqps": 1},
"1WF": {"ird1wf": 1},
"1DEN": {"ird1den": 1},
"BSR": {"irdbsreso": 1},
"BSC": {"irdbscoup": 1},
"HAYDR_SAVE": {"irdhaydock": 1},
"DDK": {"irdddk": 1},
"DDB": {},
"GKK": {},
"DKK": {},
}
def irdvars_for_ext(ext):
"""
Returns a dictionary with the ABINIT variables
that must be used to read the file with extension ext.
"""
return _EXT2VARS[ext].copy()
def abi_extensions():
"""List with all the ABINIT extensions that are registered."""
return list(_EXT2VARS.keys())[:]
def abi_splitext(filename):
"""
Split the ABINIT extension from a filename.
"Extension" are found by searching in an internal database.
Returns "(root, ext)" where ext is the registered ABINIT extension
The final ".nc" is included (if any)
>>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK')
>>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
"""
filename = os.path.basename(filename)
is_ncfile = False
if filename.endswith(".nc"):
is_ncfile = True
filename = filename[:-3]
known_extensions = abi_extensions()
# This algorith fails if we have two files
# e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE
for i in range(len(filename)-1, -1, -1):
ext = filename[i:]
if ext in known_extensions:
break
else:
raise ValueError("Cannot find a registered extension in %s" % filename)
root = filename[:i]
if is_ncfile:
ext += ".nc"
return root, ext
class FilepathFixer(object):
"""
This object modifies the names of particular output files
produced by ABINIT so that the file extension is preserved.
Having a one-to-one mapping between file extension and data format
is indeed fundamental for the correct behaviour of abinitio since:
- We locate the output file by just inspecting the extension
- We select the variables that must be added to the input file
on the basis of the extension specified by the user during
the initialization of the `AbinitFlow`.
Unfortunately, ABINIT developers like to append extra stuff
to the initial extension and therefore we have to call
`FilepathFixer` to fix the output files produced by the run.
Example:
>>> fixer = FilepathFixer()
>>> assert fixer.fix_paths('/foo/out_1WF17') == {'/foo/out_1WF17': '/foo/out_1WF'}
>>> assert fixer.fix_paths('/foo/out_1WF5.nc') == {'/foo/out_1WF5.nc': '/foo/out_1WF.nc'}
"""
def __init__(self):
# dictionary mapping the *official* file extension to
# the regular expression used to tokenize the basename of the file
# To add a new fix it's sufficient to add a new regexp and
# a static method _fix_EXTNAME
self.regs = regs = {}
import re
regs["1WF"] = re.compile("(\w+_)1WF(\d+)(.nc)?$")
regs["1DEN"] = re.compile("(\w+_)1DEN(\d+)(.nc)?$")
@staticmethod
def _fix_1WF(match):
root, pert, ncext = match.groups()
if ncext is None: ncext = ""
return root + "1WF" + ncext
@staticmethod
def _fix_1DEN(match):
root, pert, ncext = match.groups()
if ncext is None: ncext = ""
return root + "1DEN" + ncext
def _fix_path(self, path):
for ext, regex in self.regs.items():
head, tail = os.path.split(path)
match = regex.match(tail)
if match:
newtail = getattr(self, "_fix_" + ext)(match)
newpath = os.path.join(head, newtail)
return newpath, ext
return None, None
def fix_paths(self, paths):
"""
Fix the filenames in the iterable paths
Returns:
old2new:
Mapping old_path --> new_path
"""
old2new, fixed_exts = {}, []
for path in list_strings(paths):
newpath, ext = self._fix_path(path)
if newpath is not None:
assert ext not in fixed_exts
fixed_exts.append(ext)
old2new[path] = newpath
return old2new
def _bop_not(obj):
"""Boolean not."""
return not bool(obj)
def _bop_and(obj1, obj2):
"""Boolean and."""
return bool(obj1) and bool(obj2)
def _bop_or(obj1, obj2):
"""Boolean or."""
return bool(obj1) or bool(obj2)
def _bop_divisible(num1, num2):
"""Return True if num1 is divisible by num2."""
return (num1 % num2) == 0.0
# Mapping string --> operator.
_UNARY_OPS = {
"$not": _bop_not,
}
_BIN_OPS = {
"$eq": operator.eq,
"$ne": operator.ne,
"$gt": operator.gt,
"$ge": operator.ge,
"$lt": operator.lt,
"$le": operator.le,
"$divisible": _bop_divisible,
"$and": _bop_and,
"$or": _bop_or,
}
_ALL_OPS = list(_UNARY_OPS.keys()) + list(_BIN_OPS.keys())
def map2rpn(map, obj):
"""
Convert a Mongodb-like dictionary to a RPN list of operands and operators.
Reverse Polish notation (RPN) is a mathematical notation in which every
operator follows all of its operands, e.g.
3 - 4 + 5 --> 3 4 - 5 +
>>> d = {2.0: {'$eq': 1.0}}
>>> assert map2rpn(d, None) == [2.0, 1.0, '$eq']
"""
rpn = []
for k, v in map.items():
if k in _ALL_OPS:
if isinstance(v, collections.Mapping):
# e.g "$not": {"$gt": "one"}
# print("in op_vmap",k, v)
values = map2rpn(v, obj)
rpn.extend(values)
rpn.append(k)
elif isinstance(v, (list, tuple)):
# e.g "$and": [{"$not": {"one": 1.0}}, {"two": {"$lt": 3}}]}
# print("in_op_list",k, v)
for d in v:
rpn.extend(map2rpn(d, obj))
rpn.append(k)
else:
# Examples
# 1) "$eq"": "attribute_name"
# 2) "$eq"": 1.0
try:
#print("in_otherv",k, v)
rpn.append(getattr(obj, v))
rpn.append(k)
except TypeError:
#print("in_otherv, raised",k, v)
rpn.extend([v, k])
else:
try:
k = getattr(obj, k)
except TypeError:
k = k
if isinstance(v, collections.Mapping):
# "one": {"$eq": 1.0}}
values = map2rpn(v, obj)
rpn.append(k)
rpn.extend(values)
else:
#"one": 1.0
rpn.extend([k, v, "$eq"])
return rpn
def evaluate_rpn(rpn):
"""
Evaluates the RPN form produced my map2rpn.
Returns:
bool
"""
vals_stack = []
for item in rpn:
if item in _ALL_OPS:
# Apply the operator and push to the task.
v2 = vals_stack.pop()
if item in _UNARY_OPS:
res = _UNARY_OPS[item](v2)
elif item in _BIN_OPS:
v1 = vals_stack.pop()
res = _BIN_OPS[item](v1, v2)
else:
raise ValueError("%s not in unary_ops or bin_ops" % str(item))
vals_stack.append(res)
else:
# Push the operand
vals_stack.append(item)
#print(vals_stack)
assert len(vals_stack) == 1
assert isinstance(vals_stack[0], bool)
return vals_stack[0]
class Condition(object):
"""
This object receives a dictionary that defines a boolean condition whose syntax is similar
to the one used in mongodb (albeit not all the operators available in mongodb are supported here).
Example:
$gt: {field: {$gt: value} }
$gt selects those documents where the value of the field is greater than (i.e. >) the specified value.
$and performs a logical AND operation on an array of two or more expressions (e.g. <expression1>, <expression2>, etc.)
and selects the documents that satisfy all the expressions in the array.
{ $and: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
Consider the following example:
db.inventory.find( { qty: { $gt: 20 } } )
This query will select all documents in the inventory collection where the qty field value is greater than 20.
Consider the following example:
db.inventory.find( { qty: { $gt: 20 } } )
db.inventory.find({ $and: [ { price: 1.99 }, { qty: { $lt: 20 } }, { sale: true } ] } )
"""
def __init__(self, cmap):
self.cmap = cmap
def __str__(self):
return str(self.cmap)
def __bool__(self):
return bool(self.cmap)
__nonzero__ = __bool__
def __call__(self, obj):
if not self: return True
try:
return evaluate_rpn(map2rpn(self.cmap, obj))
except Exception as exc:
logger.warning("Condition(%s) raised Exception:\n %s" % (type(obj), str(exc)))
return False
class Editor(object):
"""
Wrapper class that calls the editor specified by the user
or the one specified in the $EDITOR env variable.
"""
def __init__(self, editor=None):
"""If editor is None, $EDITOR is used."""
self.editor = os.getenv("EDITOR", "vi") if editor is None else str(editor)
def edit_files(self, fnames, ask_for_exit=True):
exit_status = 0
for idx, fname in enumerate(fnames):
exit_status = self.edit_file(fname)
if ask_for_exit and idx != len(fnames)-1 and self.user_wants_to_exit():
break
return exit_status
def edit_file(self, fname):
from subprocess import call
retcode = call([self.editor, fname])
if retcode != 0:
import warnings
warnings.warn("Error while trying to edit file: %s" % fname)
return retcode
@staticmethod
def user_wants_to_exit():
"""Show an interactive prompt asking if exit is wanted."""
try:
answer = raw_input("Do you want to continue [Y/n]")
except EOFError:
return True
return answer.lower().strip() in ["n", "no"]
|
|
import pytest
from django.core import mail
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from openslides.core.config import config
from openslides.users.models import Group, PersonalNote, User
from openslides.users.serializers import UserFullSerializer
from openslides.utils.autoupdate import inform_changed_data
from openslides.utils.test import TestCase
from ..helpers import count_queries
@pytest.mark.django_db(transaction=False)
def test_user_db_queries():
"""
Tests that only the following db queries are done:
* 2 requests to get the list of all users and
* 1 requests to get the list of all groups.
"""
for index in range(10):
User.objects.create(username=f"user{index}")
assert count_queries(User.get_elements) == 3
@pytest.mark.django_db(transaction=False)
def test_group_db_queries():
"""
Tests that only the following db queries are done:
* 1 request to get the list of all groups.
* 1 request to get the permissions
"""
for index in range(10):
Group.objects.create(name=f"group{index}")
assert count_queries(Group.get_elements) == 2
class UserGetTest(TestCase):
"""
Tests to receive a users via REST API.
"""
def test_get_with_user_who_is_in_group_with_pk_1(self):
"""
It is invalid, that a user is in the group with the pk 1. But if the
database is invalid, the user should nevertheless be received.
"""
admin = User.objects.get(username="admin")
group1 = Group.objects.get(pk=1)
admin.groups.add(group1)
self.client.login(username="admin", password="admin")
response = self.client.get("/rest/users/user/1/")
self.assertEqual(response.status_code, 200)
def test_get_with_user_without_permissions(self):
group = Group.objects.get(pk=1)
permission_string = "users.can_see_name"
app_label, codename = permission_string.split(".")
permission = group.permissions.get(
content_type__app_label=app_label, codename=codename
)
group.permissions.remove(permission)
inform_changed_data(group)
config["general_system_enable_anonymous"] = True
guest_client = APIClient()
response = guest_client.get("/rest/users/user/1/")
self.assertEqual(response.status_code, 404)
class UserCreate(TestCase):
"""
Tests creation of users via REST API.
"""
def test_simple_creation(self):
self.client.login(username="admin", password="admin")
response = self.client.post(
reverse("user-list"), {"last_name": "Test name keimeiShieX4Aekoe3do"}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
new_user = User.objects.get(username="Test name keimeiShieX4Aekoe3do")
self.assertEqual(response.data["id"], new_user.id)
def test_creation_with_group(self):
self.client.login(username="admin", password="admin")
# These are the builtin groups 'Delegates' and 'Staff'. The pks are valid.
group_pks = (2, 3)
self.client.post(
reverse("user-list"),
{"last_name": "Test name aedah1iequoof0Ashed4", "groups_id": group_pks},
)
user = User.objects.get(username="Test name aedah1iequoof0Ashed4")
self.assertTrue(user.groups.filter(pk=group_pks[0]).exists())
self.assertTrue(user.groups.filter(pk=group_pks[1]).exists())
def test_creation_with_default_group(self):
self.client.login(username="admin", password="admin")
# This is the builtin groups 'default'.
# The pk is valid. But this group can not be added to users.
group_pk = (1,)
response = self.client.post(
reverse("user-list"),
{"last_name": "Test name aedah1iequoof0Ashed4", "groups_id": group_pk},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{"groups_id": ['Invalid pk "%d" - object does not exist.' % group_pk]},
)
class UserUpdate(TestCase):
"""
Tests update of users via REST API.
"""
def test_simple_update_via_patch(self):
"""
Test to only update the last_name with a patch request.
The field username *should not* be changed by the request.
"""
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# This is the builtin user 'Administrator' with username 'admin'. The pk is valid.
user_pk = User.objects.get(username="admin").pk
response = admin_client.patch(
reverse("user-detail", args=[user_pk]),
{"last_name": "New name tu3ooh5Iez5Aec2laefo"},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = User.objects.get(pk=user_pk)
self.assertEqual(user.last_name, "New name tu3ooh5Iez5Aec2laefo")
self.assertEqual(user.username, "admin")
def test_simple_update_via_put(self):
"""
Test to only update the last_name with a put request.
The field username *should* be changed by the request.
"""
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# This is the builtin user 'Administrator'. The pk is valid.
user_pk = User.objects.get(username="admin").pk
response = admin_client.put(
reverse("user-detail", args=[user_pk]), {"last_name": "New name Ohy4eeyei5"}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.get(pk=user_pk).username, "New name Ohy4eeyei5")
def test_update_deactivate_yourselfself(self):
"""
Tests that an user can not deactivate himself.
"""
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# This is the builtin user 'Administrator'. The pk is valid.
user_pk = User.objects.get(username="admin").pk
response = admin_client.patch(
reverse("user-detail", args=[user_pk]),
{"username": "admin", "is_active": False},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_yourself_non_manager(self):
"""
Tests that an user can update himself even if he is not a manager.
"""
user = User.objects.create_user(
username="non-admin zeiyeGhaoXoh4awe3xai",
password="non-admin chah1hoshohN5Oh7zouj",
)
client = APIClient()
client.login(
username="non-admin zeiyeGhaoXoh4awe3xai",
password="non-admin chah1hoshohN5Oh7zouj",
)
response = client.put(
reverse("user-detail", args=[user.pk]),
{
"username": "New username IeWeipee5mahpi4quupo",
"last_name": "New name fae1Bu1Eyeis9eRox4xu",
"about_me": "New profile text Faemahphi3Hilokangei",
},
)
self.assertEqual(response.status_code, 200)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.username, "New username IeWeipee5mahpi4quupo")
self.assertEqual(user.about_me, "New profile text Faemahphi3Hilokangei")
# The user is not allowed to change some other fields (like last_name).
self.assertNotEqual(user.last_name, "New name fae1Bu1Eyeis9eRox4xu")
class UserDelete(TestCase):
"""
Tests delete of users via REST API.
"""
def test_delete(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
User.objects.create(username="Test name bo3zieT3iefahng0ahqu")
response = admin_client.delete(reverse("user-detail", args=["2"]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(
User.objects.filter(username="Test name bo3zieT3iefahng0ahqu").exists()
)
def test_delete_yourself(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# This is the builtin user 'Administrator'. The pk is valid.
admin_user_pk = 1
response = admin_client.delete(reverse("user-detail", args=[admin_user_pk]))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UserResetPassword(TestCase):
"""
Tests resetting users password via REST API by a manager.
"""
def test_reset(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
user = User.objects.create(username="Test name ooMoa4ou4mohn2eo1ree")
user.default_password = "new_password_Yuuh8OoQueePahngohy3"
user.save()
response = admin_client.post(
reverse("user-reset-password", args=[user.pk]),
{"password": "new_password_Yuuh8OoQueePahngohy3_new"},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(
User.objects.get(pk=user.pk).check_password(
"new_password_Yuuh8OoQueePahngohy3_new"
)
)
"""
Tests whether a random password is set as default and actual password
if no default password is provided.
"""
def test_set_random_initial_password(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
serializer = UserFullSerializer()
user = serializer.create({"username": "Test name 9gt043qwvnj2d0cr"})
user.save()
default_password = User.objects.get(pk=user.pk).default_password
self.assertIsNotNone(default_password)
self.assertEqual(len(default_password), 8)
self.assertTrue(User.objects.get(pk=user.pk).check_password(default_password))
class UserMassImport(TestCase):
"""
Tests mass import of users.
"""
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
def test_mass_import(self):
user_1 = {
"first_name": "first_name_kafaith3woh3thie7Ciy",
"last_name": "last_name_phah0jaeph9ThoongaeL",
"groups_id": [],
}
user_2 = {
"first_name": "first_name_kohdao7Eibouwee8ma2O",
"last_name": "last_name_kafaith3woh3thie7Ciy",
"groups_id": [],
}
response = self.client.post(
reverse("user-mass-import"), {"users": [user_1, user_2]}, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.count(), 3)
class UserSendIntivationEmail(TestCase):
"""
Tests sending an email to the user.
"""
email = "[email protected]"
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.admin = User.objects.get()
self.admin.email = self.email
self.admin.save()
def test_email_sending(self):
data = {
"user_ids": [self.admin.pk],
"subject": config["users_email_subject"],
"message": config["users_email_body"],
}
response = self.client.post(
reverse("user-mass-invite-email"), data, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to[0], self.email)
class GroupMetadata(TestCase):
def test_options_request_as_anonymous_user_activated(self):
config["general_system_enable_anonymous"] = True
response = self.client.options("/rest/users/group/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["name"], "Group List")
perm_list = response.data["actions"]["POST"]["permissions"]["choices"]
self.assertEqual(type(perm_list), list)
for item in perm_list:
self.assertEqual(type(item), dict)
self.assertTrue(item.get("display_name") is not None)
self.assertTrue(item.get("value") is not None)
class GroupReceive(TestCase):
def test_get_groups_as_anonymous_deactivated(self):
"""
Test to get the groups with an anonymous user, when they are deactivated.
"""
response = self.client.get("/rest/users/group/")
self.assertEqual(response.status_code, 403)
def test_get_groups_as_anonymous_user_activated(self):
"""
Test to get the groups with an anonymous user, when they are activated.
"""
config["general_system_enable_anonymous"] = True
response = self.client.get("/rest/users/group/")
self.assertEqual(response.status_code, 200)
def test_logged_in_user_with_no_permission(self):
"""
Test to get the groups with an logged in user with no permissions.
"""
user = User(username="test")
user.set_password("test")
user.save()
default_group = Group.objects.get(pk=1)
default_group.permissions.all().delete()
self.client.login(username="test", password="test")
response = self.client.get("/rest/users/group/")
self.assertEqual(response.status_code, 200)
class GroupCreate(TestCase):
"""
Tests creation of groups via REST API.
"""
def test_creation(self):
self.client.login(username="admin", password="admin")
# This contains two valid permissions of the users app.
permissions = ("users.can_see_name", "users.can_see_extra_data")
response = self.client.post(
reverse("group-list"),
{"name": "Test name la8eephu9vaecheiKeif", "permissions": permissions},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
group = Group.objects.get(name="Test name la8eephu9vaecheiKeif")
for permission in permissions:
app_label, codename = permission.split(".")
self.assertTrue(
group.permissions.get(
content_type__app_label=app_label, codename=codename
)
)
def test_failed_creation_invalid_value(self):
self.client.login(username="admin", password="admin")
permissions = ("invalid_permission",)
response = self.client.post(
reverse("group-list"),
{"name": "Test name ool5aeb6Rai2aiLaith1", "permissions": permissions},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{
"permissions": [
'Incorrect value "invalid_permission". Expected app_label.codename string.'
]
},
)
def test_failed_creation_invalid_permission(self):
self.client.login(username="admin", password="admin")
permissions = ("invalid_app.invalid_permission",)
response = self.client.post(
reverse("group-list"),
{"name": "Test name wei2go2aiV3eophi9Ohg", "permissions": permissions},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data,
{
"permissions": [
'Invalid permission "invalid_app.invalid_permission". Object does not exist.'
]
},
)
class GroupUpdate(TestCase):
"""
Tests update of groups via REST API.
"""
def test_simple_update_via_patch(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# This is the builtin group 'Delegates'. The pk is valid.
group_pk = 2
# This contains one valid permission of the users app.
permissions = ("users.can_see_name",)
response = admin_client.patch(
reverse("group-detail", args=[group_pk]), {"permissions": permissions}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
group = Group.objects.get(pk=group_pk)
for permission in permissions:
app_label, codename = permission.split(".")
self.assertTrue(
group.permissions.get(
content_type__app_label=app_label, codename=codename
)
)
def test_simple_update_via_put(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# This is the builtin group 'Delegates'. The pk is valid.
group_pk = 2
# This contains one valid permission of the users app.
permissions = ("users.can_see_name",)
response = admin_client.put(
reverse("group-detail", args=[group_pk]), {"permissions": permissions}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"name": ["This field is required."]})
def test_update_via_put_with_new_permissions(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
group = Group.objects.create(name="group_name_inooThe3dii4mahWeeSe")
# This contains all permissions.
permissions = [
"agenda.can_be_speaker",
"agenda.can_manage",
"agenda.can_see",
"agenda.can_see_internal_items",
"assignments.can_manage",
"assignments.can_nominate_other",
"assignments.can_nominate_self",
"assignments.can_see",
"core.can_manage_config",
"core.can_manage_projector",
"core.can_manage_tags",
"core.can_manage_chat",
"core.can_see_frontpage",
"core.can_see_projector",
"core.can_use_chat",
"mediafiles.can_manage",
"mediafiles.can_see",
"mediafiles.can_see_hidden",
"mediafiles.can_upload",
"motions.can_create",
"motions.can_manage",
"motions.can_see",
"motions.can_support",
"users.can_manage",
"users.can_see_extra_data",
"users.can_see_name",
]
response = admin_client.put(
reverse("group-detail", args=[group.pk]),
{"name": "new_group_name_Chie6duwaepoo8aech7r", "permissions": permissions},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
group = Group.objects.get(pk=group.pk)
for permission in permissions:
app_label, codename = permission.split(".")
self.assertTrue(
group.permissions.get(
content_type__app_label=app_label, codename=codename
)
)
class GroupDelete(TestCase):
"""
Tests delete of groups via REST API.
"""
def test_delete(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
group = Group.objects.create(name="Test name Koh4lohlaewoog9Ahsh5")
response = admin_client.delete(reverse("group-detail", args=[group.pk]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(
Group.objects.filter(name="Test name Koh4lohlaewoog9Ahsh5").exists()
)
def test_delete_builtin_groups(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
# The pk of builtin group 'Default'
group_pk = 1
response = admin_client.delete(reverse("group-detail", args=[group_pk]))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class PersonalNoteTest(TestCase):
"""
Tests for PersonalNote model.
"""
def test_anonymous_without_personal_notes(self):
admin = User.objects.get(username="admin")
personal_note = PersonalNote.objects.create(
user=admin, notes='["admin_personal_note_OoGh8choro0oosh0roob"]'
)
config["general_system_enable_anonymous"] = True
guest_client = APIClient()
response = guest_client.get(
reverse("personalnote-detail", args=[personal_note.pk])
)
self.assertEqual(response.status_code, 404)
def test_admin_send_JSON(self):
admin_client = APIClient()
admin_client.login(username="admin", password="admin")
response = admin_client.post(
reverse("personalnote-list"),
{
"notes": {
"example-model": {
"1": {
"note": "note for the example.model with id 1 Oohae1JeuSedooyeeviH",
"star": True,
}
}
}
},
format="json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to the VMware vCenter platform.
"""
import os
import re
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import versionutils as v_utils
from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
from oslo_vmware import vim
from oslo_vmware import vim_util
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova.i18n import _
import nova.privsep.path
from nova import rc_fields as fields
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim_util as nova_vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
TIME_BETWEEN_API_CALL_RETRIES = 1.0
MAX_CONSOLE_BYTES = 100 * units.Ki
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
"supports_migrate_to_same_host": True,
"supports_attach_interface": True,
"supports_multiattach": False
}
# Legacy nodename is of the form: <mo id>(<cluster name>)
# e.g. domain-26(TestCluster)
# We assume <mo id> consists of alphanumeric, _ and -.
# We assume cluster name is everything between the first ( and the last ).
# We pull out <mo id> for re-use.
LEGACY_NODENAME = re.compile('([\w-]+)\(.+\)')
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
if (CONF.vmware.host_ip is None or
CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, host_username and "
"host_password to use vmwareapi.VMwareVCDriver"))
if CONF.vnc.keymap:
LOG.warning('The option "[vnc] keymap" has been deprecated in '
'favor of the VMWare-specific "[vmware] vnc_keymap" '
'option. Please update nova.conf to address this '
'change')
self._datastore_regex = None
if CONF.vmware.datastore_regex:
try:
self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
except re.error:
raise exception.InvalidInput(reason=
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
self._check_min_version()
# Update the PBM location if necessary
if CONF.vmware.pbm_enabled:
self._update_pbm_location()
self._validate_configuration()
self._cluster_name = CONF.vmware.cluster_name
self._cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
self._cluster_name)
if self._cluster_ref is None:
raise exception.NotFound(_("The specified cluster '%s' was not "
"found in vCenter")
% self._cluster_name)
self._vcenter_uuid = self._get_vcenter_uuid()
self._nodename = self._create_nodename(self._cluster_ref.value)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self._cluster_ref)
self._vmops = vmops.VMwareVMOps(self._session,
virtapi,
self._volumeops,
self._cluster_ref,
datastore_regex=self._datastore_regex)
self._vc_state = host.VCState(self._session,
self._nodename,
self._cluster_ref,
self._datastore_regex)
# Register the OpenStack extension
self._register_openstack_extension()
def _check_min_version(self):
min_version = v_utils.convert_version_to_int(constants.MIN_VC_VERSION)
next_min_ver = v_utils.convert_version_to_int(
constants.NEXT_MIN_VC_VERSION)
vc_version = vim_util.get_vc_version(self._session)
LOG.info("VMware vCenter version: %s", vc_version)
if v_utils.convert_version_to_int(vc_version) < min_version:
raise exception.NovaException(
_('Detected vCenter version %(version)s. Nova requires VMware '
'vCenter version %(min_version)s or greater.') % {
'version': vc_version,
'min_version': constants.MIN_VC_VERSION})
elif v_utils.convert_version_to_int(vc_version) < next_min_ver:
LOG.warning('Running Nova with a VMware vCenter version less '
'than %(version)s is deprecated. The required '
'minimum version of vCenter will be raised to '
'%(version)s in the 16.0.0 release.',
{'version': constants.NEXT_MIN_VC_VERSION})
@property
def need_legacy_block_device_info(self):
return False
def _update_pbm_location(self):
if CONF.vmware.pbm_wsdl_location:
pbm_wsdl_loc = CONF.vmware.pbm_wsdl_location
else:
version = vim_util.get_vc_version(self._session)
pbm_wsdl_loc = pbm.get_pbm_wsdl_location(version)
self._session.pbm_wsdl_loc_set(pbm_wsdl_loc)
def _validate_configuration(self):
if CONF.vmware.pbm_enabled:
if not CONF.vmware.pbm_default_policy:
raise error_util.PbmDefaultPolicyUnspecified()
if not pbm.get_profile_id_by_name(
self._session,
CONF.vmware.pbm_default_policy):
raise error_util.PbmDefaultPolicyDoesNotExist()
if CONF.vmware.datastore_regex:
LOG.warning("datastore_regex is ignored when PBM is enabled")
self._datastore_regex = None
def init_host(self, host):
vim = self._session.vim
if vim is None:
self._session._create_session()
def cleanup_host(self, host):
self._session.logout()
def _register_openstack_extension(self):
# Register an 'OpenStack' extension in vCenter
os_extension = self._session._call_method(vim_util, 'find_extension',
constants.EXTENSION_KEY)
if os_extension is None:
try:
self._session._call_method(vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)
LOG.info('Registered extension %s with vCenter',
constants.EXTENSION_KEY)
except vexc.VimFaultException as e:
with excutils.save_and_reraise_exception() as ctx:
if 'InvalidArgument' in e.fault_list:
LOG.debug('Extension %s already exists.',
constants.EXTENSION_KEY)
ctx.reraise = False
else:
LOG.debug('Extension %s already exists.', constants.EXTENSION_KEY)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
state = vm_util.get_vm_state(self._session, instance)
ignored_states = [power_state.RUNNING, power_state.SUSPENDED]
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
return self._vmops.list_instances()
def list_instances(self):
"""List VM instances from the single compute node."""
return self._vmops.list_instances()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor)
def confirm_migration(self, context, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
self._vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
# vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
return self._vmops.get_vnc_console(instance)
def get_mks_console(self, context, instance):
return self._vmops.get_mks_console(instance)
def get_console_output(self, context, instance):
if not CONF.vmware.serial_log_dir:
LOG.error("The 'serial_log_dir' config option is not set!")
return
fname = instance.uuid.replace('-', '')
path = os.path.join(CONF.vmware.serial_log_dir, fname)
if not os.path.exists(path):
LOG.warning('The console log is missing. Check your VSPC '
'configuration', instance=instance)
return b""
read_log_data, remaining = nova.privsep.path.last_bytes(
path, MAX_CONSOLE_BYTES)
return read_log_data
def _get_vcenter_uuid(self):
"""Retrieves the vCenter UUID."""
about = self._session._call_method(nova_vim_util, 'get_about_info')
return about.instanceUuid
def _create_nodename(self, mo_id):
"""Return a nodename which uniquely describes a cluster.
The name will be of the form:
<mo id>.<vcenter uuid>
e.g.
domain-26.9d51f082-58a4-4449-beed-6fd205a5726b
"""
return '%s.%s' % (mo_id, self._vcenter_uuid)
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
# The VMWare driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'supported_instances': host_stats['supported_instances'],
'numa_topology': None,
}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
host_stats = self._vc_state.get_host_stats(refresh=True)
stats_dict = self._get_available_resources(host_stats)
return stats_dict
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This driver supports only one compute node.
"""
return [self._nodename]
def get_inventory(self, nodename):
"""Return a dict, keyed by resource class, of inventory information for
the supplied node.
"""
stats = vm_util.get_stats_from_cluster(self._session,
self._cluster_ref)
datastores = ds_util.get_available_datastores(self._session,
self._cluster_ref,
self._datastore_regex)
total_disk_capacity = sum([ds.capacity for ds in datastores])
max_free_space = max([ds.freespace for ds in datastores])
reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
result = {
fields.ResourceClass.VCPU: {
'total': stats['cpu']['vcpus'],
'reserved': CONF.reserved_host_cpus,
'min_unit': 1,
'max_unit': stats['cpu']['max_vcpus_per_host'],
'step_size': 1,
},
fields.ResourceClass.MEMORY_MB: {
'total': stats['mem']['total'],
'reserved': CONF.reserved_host_memory_mb,
'min_unit': 1,
'max_unit': stats['mem']['max_mem_mb_per_host'],
'step_size': 1,
},
fields.ResourceClass.DISK_GB: {
'total': total_disk_capacity // units.Gi,
'reserved': reserved_disk_gb,
'min_unit': 1,
'max_unit': max_free_space // units.Gi,
'step_size': 1,
},
}
return result
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info, instance)
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
# NOTE(claudiub): if context parameter is to be used in the future,
# the _detach_instance_volumes method will have to be updated as well.
return self._volumeops.detach_volume(connection_info, instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Returns the IP address of the vCenter host."""
return CONF.vmware.host_ip
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info, reboot_type)
def _detach_instance_volumes(self, instance, block_device_info):
# We need to detach attached volumes
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if block_device_mapping:
# Certain disk types, for example 'IDE' do not support hot
# plugging. Hence we need to power off the instance and update
# the instance state.
self._vmops.power_off(instance)
for disk in block_device_mapping:
connection_info = disk['connection_info']
try:
# NOTE(claudiub): Passing None as the context, as it is
# not currently used.
self.detach_volume(None, connection_info, instance,
disk.get('device_name'))
except exception.DiskNotFound:
LOG.warning('The volume %s does not exist!',
disk.get('device_name'),
instance=instance)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Failed to detach %(device_name)s. "
"Exception: %(exc)s",
{'device_name': disk.get('device_name'),
'exc': e},
instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker
# is not successful. When resource claim is not successful,
# node is not set in instance. Perform destroy only if node is set
if not instance.node:
return
# A resize uses the same instance on the VC. We do not delete that
# VM in the event of a revert
if instance.task_state == task_states.RESIZE_REVERTING:
return
# We need to detach attached volumes
if block_device_info is not None:
try:
self._detach_instance_volumes(instance, block_device_info)
except vexc.ManagedObjectNotFoundException:
LOG.warning('Instance does not exists. Proceeding to '
'delete instance properties on datastore',
instance=instance)
self._vmops.destroy(instance, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def suspend(self, context, instance):
"""Suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def host_power_action(self, action):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def set_host_enabled(self, enabled):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def get_host_uptime(self):
"""Host uptime operation not supported by VC driver."""
msg = _("Multiple hosts may be managed by the VMWare "
"vCenter driver; therefore we do not return "
"uptime for just one host.")
raise NotImplementedError(msg)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self._vmops.manage_image_cache(context, all_instances)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
return self._vmops.instance_exists(instance)
def attach_interface(self, context, instance, image_meta, vif):
"""Attach an interface to the instance."""
self._vmops.attach_interface(context, instance, image_meta, vif)
def detach_interface(self, context, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(context, instance, vif)
class VMwareAPISession(api.VMwareAPISession):
"""Sets up a session with the VC/ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip=CONF.vmware.host_ip,
host_port=CONF.vmware.host_port,
username=CONF.vmware.host_username,
password=CONF.vmware.host_password,
retry_count=CONF.vmware.api_retry_count,
scheme="https",
cacert=CONF.vmware.ca_file,
insecure=CONF.vmware.insecure,
pool_size=CONF.vmware.connection_pool_size):
super(VMwareAPISession, self).__init__(
host=host_ip,
port=host_port,
server_username=username,
server_password=password,
api_retry_count=retry_count,
task_poll_interval=CONF.vmware.task_poll_interval,
scheme=scheme,
create_session=True,
cacert=cacert,
insecure=insecure,
pool_size=pool_size)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""Calls a method within the module specified with
args provided.
"""
if not self._is_vim_object(module):
return self.invoke_api(module, method, self.vim, *args, **kwargs)
else:
return self.invoke_api(module, method, *args, **kwargs)
def _wait_for_task(self, task_ref):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
return self.wait_for_task(task_ref)
|
|
# -*- coding: utf-8 -*-
"""
This file is part of labella.py.
Python rewrite of the VPSC code included in Labella.js.
Originally modified from:
https://github.com/tgdwyer/WebCola/blob/master/WebCola/src/vpsc.ts
Author: G.J.J. van den Burg
License: Apache-2.0
"""
from sys import maxsize
class PositionStats(object):
def __init__(self, scale):
self.scale = scale
self.AB = 0
self.AD = 0
self.A2 = 0
def addVariable(self, v):
ai = self.scale / v.scale
bi = v.offset / v.scale
wi = v.weight
self.AB += wi * ai * bi
self.AD += wi * ai * v.desiredPosition
self.A2 += wi * ai * ai
def getPosn(self):
return (self.AD - self.AB) / self.A2
class Constraint(object):
def __init__(self, left, right, gap, equality=None):
if equality is None:
equality = False
self.left = left
self.right = right
self.gap = gap
self.equality = equality
self.active = False
self.unsatisfiable = False
def slack(self):
if self.unsatisfiable:
return maxsize
return (
self.right.scale * self.right.position()
- self.gap
- self.left.scale * self.left.position()
)
def __repr__(self):
s = "Constraint(left=%r, right=%r, gap=%r, equality=%r)" % (
self.left,
self.right,
self.gap,
self.equality,
)
return s
def __str__(self):
return repr(self)
class Variable(object):
def __init__(self, desiredPosition, weight=None, scale=None):
if weight is None:
weight = 1
if scale is None:
scale = 1
self.desiredPosition = desiredPosition
self.weight = weight
self.scale = scale
self.offset = 0
self.node = None
def dfdv(self):
return 2.0 * self.weight * (self.position() - self.desiredPosition)
def position(self):
return (
self.block.ps.scale * self.block.posn + self.offset
) / self.scale
def visitNeighbours(self, prev, f):
def ff(c, _next):
return c.active and prev != _next and f(c, _next)
for c in self.cOut:
ff(c, c.right)
for c in self.cIn:
ff(c, c.left)
def __repr__(self):
s = "Variable(desiredPos=%r, weight=%r, scale=%r, offset=%r)" % (
self.desiredPosition,
self.weight,
self.scale,
self.offset,
)
return s
def __str__(self):
return repr(self)
class Block(object):
def __init__(self, v):
self.vars = []
v.offset = 0
self.ps = PositionStats(v.scale)
self.addVariable(v)
def addVariable(self, v):
v.block = self
self.vars.append(v)
self.ps.addVariable(v)
self.posn = self.ps.getPosn()
def updateWeightedPosition(self):
self.ps.AB = 0
self.ps.AD = 0
self.ps.A2 = 0
for i in range(len(self.vars)):
self.ps.addVariable(self.vars[i])
self.posn = self.ps.getPosn()
def compute_lm(self, v, u, postAction):
dfdv = v.dfdv()
_self = self
def f(c, _next):
nonlocal dfdv
_dfdv = _self.compute_lm(_next, v, postAction)
if _next == c.right:
dfdv += _dfdv * c.left.scale
c.lm = _dfdv
else:
dfdv += _dfdv * c.right.scale
c.lm = -_dfdv
postAction(c)
v.visitNeighbours(u, f)
return dfdv / v.scale
def populateSplitBlock(self, v, prev):
_self = self
def f(c, _next):
_next.offset = v.offset
if _next == c.right:
_next.offset += c.gap
else:
_next.offset -= c.gap
_self.addVariable(_next)
_self.populateSplitBlock(_next, v)
v.visitNeighbours(prev, f)
def traverse(self, visit, acc, v, prev):
_self = self
if not v:
v = self.vars[0]
if not prev:
prev = None
def f(c, _next):
acc.push(visit(c))
_self.traverse(visit, acc, _next, v)
v.visitNeighbours(prev, f)
def findMinLM(self):
m = None
def f(c):
nonlocal m
if not c.equality and (m is None or c.lm < m.lm):
m = c
self.compute_lm(self.vars[0], None, f)
return m
def findMinLMBetween(self, lv, rv):
def f(x):
pass
self.compute_lm(lv, None, f)
m = None
def f(c, _next):
nonlocal m
if (
not c.equality
and c.right == _next
and (m is None or c.lm < m.lm)
):
m = c
self.findPath(lv, None, rv, f)
return m
def findPath(self, v, prev, to, visit):
_self = self
endFound = False
def f(c, _next):
nonlocal endFound
if not endFound and (
_next == to or _self.findPath(_next, v, to, visit)
):
endFound = True
visit(c, _next)
v.visitNeighbours(prev, f)
return endFound
def isActiveDirectedPathBetween(self, u, v):
if u == v:
return True
for i in range(len(u.cOut) - 1, -1, -1):
c = u.cOut[i]
if c.active and self.isActiveDirectedPathBetween(c.right, v):
return True
return False
@classmethod
def split(cls, c):
c.active = False
return [
Block.createSplitBlock(c.left),
Block.createSplitBlock(c.right),
]
@classmethod
def createSplitBlock(cls, startVar):
b = Block(startVar)
b.populateSplitBlock(startVar, None)
return b
def splitBetween(self, vl, vr):
c = self.findMinLMBetween(vl, vr)
if not c is None:
bs = Block.split(c)
return {"constraint": c, "lb": bs[0], "rb": bs[1]}
return None
def mergeAcross(self, b, c, dist):
c.active = True
for i in range(len(b.vars)):
v = b.vars[i]
v.offset += dist
self.addVariable(v)
self.posn = self.ps.getPosn()
def cost(self):
_sum = 0
for i in range(len(self.vars) - 1, -1, -1):
v = self.vars[i]
d = v.position() - v.desiredPosition
_sum += d * d * v.weight
return _sum
class Blocks(object):
def __init__(self, vs):
self.vs = vs
n = len(vs)
self._list = [None] * n
for i in range(len(vs) - 1, -1, -1):
b = Block(vs[i])
self._list[i] = b
b.blockInd = i
def cost(self):
_sum = 0
for i in range(len(self._list) - 1, -1, -1):
_sum += self._list[i].cost()
return _sum
def insert(self, b):
b.blockInd = len(self._list)
self._list.append(b)
def remove(self, b):
swapBlock = self._list[-1]
if not b == swapBlock:
self._list[b.blockInd] = swapBlock
swapBlock.blockInd = b.blockInd
self._list = self._list[:-1]
def merge(self, c):
l = c.left.block
r = c.right.block
dist = c.right.offset - c.left.offset - c.gap
if len(l.vars) < len(r.vars):
r.mergeAcross(l, c, dist)
self.remove(l)
else:
l.mergeAcross(r, c, -dist)
self.remove(r)
def forEach(self, f):
for b in self._list:
f(b)
def updateBlockPositions(self):
for b in self._list:
b.updateWeightedPosition()
def split(self, inactive):
self.updateBlockPositions()
for b in self._list:
v = b.findMinLM()
if not v is None and v.lm < Solver.LAGRANGIAN_TOLERANCE:
b = v.left.block
newblocks = Block.split(v)
for nb in newblocks:
self.insert(nb)
self.remove(b)
inactive.append(v)
class Solver(object):
LAGRANGIAN_TOLERANCE = -1e-4
ZERO_UPPERBOUND = -1e-10
def __init__(self, vs, cs):
self.vs = vs
self.cs = cs
for v in vs:
v.cIn = []
v.cOut = []
for c in cs:
c.left.cOut.append(c)
c.right.cIn.append(c)
self.inactive = cs[:]
for c in self.inactive:
c.active = False
self.bs = None
def cost(self):
return self.bs.cost()
def setStartingPositions(self, ps):
self.inactive = self.cs[:]
for c in self.inactive:
c.active = False
self.bs = Blocks(self.vs)
for i, b in enumerate(self.bs):
b.posn = ps[i]
def setDesiredPositions(self, ps):
for i, v in enumerate(self.vs):
v.desiredPosition = ps[i]
def mostViolated(self):
minSlack = maxsize
v = None
l = self.inactive
n = len(l)
deletePoint = n
for i in range(n):
c = l[i]
if c.unsatisfiable:
continue
slack = c.slack()
if c.equality or slack < minSlack:
minSlack = slack
v = c
deletePoint = i
if c.equality:
break
if deletePoint != n and (
minSlack < Solver.ZERO_UPPERBOUND and not v.active or v.equality
):
l[deletePoint] = l[n - 1]
l = l[:-1]
return v
def satisfy(self):
if self.bs is None:
self.bs = Blocks(self.vs)
self.bs.split(self.inactive)
v = self.mostViolated()
while (v) and (
v.equality or v.slack() < Solver.ZERO_UPPERBOUND and not v.active
):
lb = v.left.block
rb = v.right.block
if lb != rb:
self.bs.merge(v)
else:
if lb.isActiveDirectedPathBetween(v.right, v.left):
# Cycle found
v.unsatisfiable = True
v = self.mostViolated()
continue
split = lb.splitBetween(v.left, v.right)
if not split is None:
self.bs.insert(split["lb"])
self.bs.insert(split["rb"])
self.bs.remove(lb)
self.inactive.append(split["constraint"])
else:
v.unsatisfiable = True
v = self.mostViolated()
continue
if v.slack() >= 0:
self.inactive.append(v)
else:
self.bs.merge(v)
def solve(self):
self.satisfy()
lastcost = maxsize
cost = self.bs.cost()
while abs(lastcost - cost) > 0.0001:
self.satisfy()
lastcost = cost
cost = self.bs.cost()
return cost
|
|
"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
import sys
import warnings
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, force_str
from django.utils.functional import allow_lazy
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils import six
from django.utils.six.moves.urllib.parse import parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit
from django.utils.text import normalize_newlines
from .html_parser import HTMLParser, HTMLParseError
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', '\'', '!']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(
r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|'
'<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(
r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x)
for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type, SafeText)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type, SafeText)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict((k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs))
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
# The strict parameter was added in Python 3.2 with a default of True.
# The default changed to False in Python 3.3 and was deprecated.
if sys.version_info[:2] == (3, 2):
HTMLParser.__init__(self, strict=False)
else:
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except (HTMLParseError, UnboundLocalError):
# UnboundLocalError because of http://bugs.python.org/issue17802
# on Python 3.2, triggered by strict=False mode of HTMLParser
return s.get_data() + s.rawdata
else:
return s.get_data()
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
while '<' in value and '>' in value:
new_value = _strip_once(value)
if new_value == value:
# _strip_once was not able to detect more tags
break
value = new_value
return value
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
warnings.warn(
"django.utils.html.remove_tags() and the removetags template filter "
"are deprecated. Consider using the bleach library instead.",
RemovedInDjango20Warning, stacklevel=3
)
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
warnings.warn(
"django.utils.html.strip_entities() is deprecated.",
RemovedInDjango20Warning, stacklevel=2
)
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
def unquote_quote(segment):
segment = unquote(force_str(segment))
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~'))
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(force_str(q[0])), unquote(force_str(q[1])))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
if not safe_input:
return text, text, trail
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
# ';' in trail can be either trailing punctuation or end-of-entity marker
if unescaped.endswith(';'):
return text, unescaped[:-1], trail
else:
text += trail
return text, unescaped, ''
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestStreamedResultSet(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner.streamed import StreamedResultSet
return StreamedResultSet
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
self.assertIs(streamed._response_iterator, iterator)
self.assertIsNone(streamed._source)
self.assertEqual(streamed.rows, [])
self.assertIsNone(streamed.metadata)
self.assertIsNone(streamed.stats)
self.assertIsNone(streamed.resume_token)
def test_ctor_w_source(self):
iterator = _MockCancellableIterator()
source = object()
streamed = self._make_one(iterator, source=source)
self.assertIs(streamed._response_iterator, iterator)
self.assertIs(streamed._source, source)
self.assertEqual(streamed.rows, [])
self.assertIsNone(streamed.metadata)
self.assertIsNone(streamed.stats)
self.assertIsNone(streamed.resume_token)
def test_fields_unset(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
with self.assertRaises(AttributeError):
streamed.fields
@staticmethod
def _make_scalar_field(name, type_):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
return StructType.Field(name=name, type=Type(code=type_))
@staticmethod
def _make_array_field(name, element_type_code=None, element_type=None):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
if element_type is None:
element_type = Type(code=element_type_code)
array_type = Type(
code='ARRAY', array_element_type=element_type)
return StructType.Field(name=name, type=array_type)
@staticmethod
def _make_struct_type(struct_type_fields):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
fields = [
StructType.Field(name=key, type=Type(code=value))
for key, value in struct_type_fields
]
struct_type = StructType(fields=fields)
return Type(code='STRUCT', struct_type=struct_type)
@staticmethod
def _make_value(value):
from google.cloud.spanner._helpers import _make_value_pb
return _make_value_pb(value)
@staticmethod
def _make_list_value(values=(), value_pbs=None):
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
from google.cloud.spanner._helpers import _make_list_value_pb
if value_pbs is not None:
return Value(list_value=ListValue(values=value_pbs))
return Value(list_value=_make_list_value_pb(values))
@staticmethod
def _make_result_set_metadata(fields=(), transaction_id=None):
from google.cloud.proto.spanner.v1.result_set_pb2 import (
ResultSetMetadata)
metadata = ResultSetMetadata()
for field in fields:
metadata.row_type.fields.add().CopyFrom(field)
if transaction_id is not None:
metadata.transaction.id = transaction_id
return metadata
@staticmethod
def _make_result_set_stats(query_plan=None, **kw):
from google.cloud.proto.spanner.v1.result_set_pb2 import (
ResultSetStats)
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner._helpers import _make_value_pb
query_stats = Struct(fields={
key: _make_value_pb(value) for key, value in kw.items()})
return ResultSetStats(
query_plan=query_plan,
query_stats=query_stats,
)
@staticmethod
def _make_partial_result_set(
values, metadata=None, stats=None, chunked_value=False):
from google.cloud.proto.spanner.v1.result_set_pb2 import (
PartialResultSet)
return PartialResultSet(
values=values,
metadata=metadata,
stats=stats,
chunked_value=chunked_value,
)
def test_properties_set(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
]
metadata = streamed._metadata = self._make_result_set_metadata(FIELDS)
stats = streamed._stats = self._make_result_set_stats()
self.assertEqual(list(streamed.fields), FIELDS)
self.assertIs(streamed.metadata, metadata)
self.assertIs(streamed.stats, stats)
def test__merge_chunk_bool(self):
from google.cloud.spanner.streamed import Unmergeable
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('registered_voter', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(True)
chunk = self._make_value(False)
with self.assertRaises(Unmergeable):
streamed._merge_chunk(chunk)
def test__merge_chunk_int64(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('age', 'INT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(42)
chunk = self._make_value(13)
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, '4213')
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_float64_nan_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('weight', 'FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(u'Na')
chunk = self._make_value(u'N')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, u'NaN')
def test__merge_chunk_float64_w_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('weight', 'FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(3.14159)
chunk = self._make_value('')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.number_value, 3.14159)
def test__merge_chunk_float64_w_float64(self):
from google.cloud.spanner.streamed import Unmergeable
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('weight', 'FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(3.14159)
chunk = self._make_value(2.71828)
with self.assertRaises(Unmergeable):
streamed._merge_chunk(chunk)
def test__merge_chunk_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('name', 'STRING'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(u'phred')
chunk = self._make_value(u'wylma')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, u'phredwylma')
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_string_w_bytes(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('image', 'BYTES'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(
u'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA'
u'6fptVAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\n',
)
chunk = self._make_value(
u'B3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0FNUExF'
u'MG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n',
)
merged = streamed._merge_chunk(chunk)
self.assertEqual(
merged.string_value,
u'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA6fptVAAAACXBIWXMAAAsTAAAL'
u'EwEAmpwYAAAA\nB3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0'
u'FNUExFMG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n',
)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_bool(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_array_field('name', element_type_code='BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([True, True])
chunk = self._make_list_value([False, False, False])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value([True, True, False, False, False])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_int(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_array_field('name', element_type_code='INT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([0, 1, 2])
chunk = self._make_list_value([3, 4, 5])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value([0, 1, 23, 4, 5])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_float(self):
import math
PI = math.pi
EULER = math.e
SQRT_2 = math.sqrt(2.0)
LOG_10 = math.log(10)
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_array_field('name', element_type_code='FLOAT64'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([PI, SQRT_2])
chunk = self._make_list_value(['', EULER, LOG_10])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value([PI, SQRT_2, EULER, LOG_10])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_array_field('name', element_type_code='STRING'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([u'A', u'B', u'C'])
chunk = self._make_list_value([None, u'D', u'E'])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value([u'A', u'B', u'C', None, u'D', u'E'])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_string_with_null(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_array_field('name', element_type_code='STRING'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value([u'A', u'B', u'C'])
chunk = self._make_list_value([u'D', u'E'])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value([u'A', u'B', u'CD', u'E'])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_array_of_int(self):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
subarray_type = Type(
code='ARRAY', array_element_type=Type(code='INT64'))
array_type = Type(code='ARRAY', array_element_type=subarray_type)
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
StructType.Field(name='loloi', type=array_type)
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value(value_pbs=[
self._make_list_value([0, 1]),
self._make_list_value([2]),
])
chunk = self._make_list_value(value_pbs=[
self._make_list_value([3]),
self._make_list_value([4, 5]),
])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value(value_pbs=[
self._make_list_value([0, 1]),
self._make_list_value([23]),
self._make_list_value([4, 5]),
])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_array_of_string(self):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
subarray_type = Type(
code='ARRAY', array_element_type=Type(code='STRING'))
array_type = Type(code='ARRAY', array_element_type=subarray_type)
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
StructType.Field(name='lolos', type=array_type)
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_list_value(value_pbs=[
self._make_list_value([u'A', u'B']),
self._make_list_value([u'C']),
])
chunk = self._make_list_value(value_pbs=[
self._make_list_value([u'D']),
self._make_list_value([u'E', u'F']),
])
merged = streamed._merge_chunk(chunk)
expected = self._make_list_value(value_pbs=[
self._make_list_value([u'A', u'B']),
self._make_list_value([u'CD']),
self._make_list_value([u'E', u'F']),
])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_struct(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
struct_type = self._make_struct_type([
('name', 'STRING'),
('age', 'INT64'),
])
FIELDS = [
self._make_array_field('test', element_type=struct_type),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
partial = self._make_list_value([u'Phred '])
streamed._pending_chunk = self._make_list_value(value_pbs=[partial])
rest = self._make_list_value([u'Phlyntstone', 31])
chunk = self._make_list_value(value_pbs=[rest])
merged = streamed._merge_chunk(chunk)
struct = self._make_list_value([u'Phred Phlyntstone', 31])
expected = self._make_list_value(value_pbs=[struct])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_struct_unmergeable(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
struct_type = self._make_struct_type([
('name', 'STRING'),
('registered', 'BOOL'),
('voted', 'BOOL'),
])
FIELDS = [
self._make_array_field('test', element_type=struct_type),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
partial = self._make_list_value([u'Phred Phlyntstone', True])
streamed._pending_chunk = self._make_list_value(value_pbs=[partial])
rest = self._make_list_value([True])
chunk = self._make_list_value(value_pbs=[rest])
merged = streamed._merge_chunk(chunk)
struct = self._make_list_value([u'Phred Phlyntstone', True, True])
expected = self._make_list_value(value_pbs=[struct])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test_merge_values_empty_and_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._current_row = []
streamed._merge_values([])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
def test_merge_values_empty_and_partial(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._make_value(bare) for bare in BARE]
streamed._current_row = []
streamed._merge_values(VALUES)
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
def test_merge_values_empty_and_filled(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BARE = [u'Phred Phlyntstone', 42, True]
VALUES = [self._make_value(bare) for bare in BARE]
streamed._current_row = []
streamed._merge_values(VALUES)
self.assertEqual(streamed.rows, [BARE])
self.assertEqual(streamed._current_row, [])
def test_merge_values_empty_and_filled_plus(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone',
]
VALUES = [self._make_value(bare) for bare in BARE]
streamed._current_row = []
streamed._merge_values(VALUES)
self.assertEqual(streamed.rows, [BARE[0:3], BARE[3:6]])
self.assertEqual(streamed._current_row, BARE[6:])
def test_merge_values_partial_and_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BEFORE = [
u'Phred Phlyntstone'
]
streamed._current_row[:] = BEFORE
streamed._merge_values([])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BEFORE)
def test_merge_values_partial_and_partial(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BEFORE = [u'Phred Phlyntstone']
streamed._current_row[:] = BEFORE
MERGED = [42]
TO_MERGE = [self._make_value(item) for item in MERGED]
streamed._merge_values(TO_MERGE)
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BEFORE + MERGED)
def test_merge_values_partial_and_filled(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BEFORE = [
u'Phred Phlyntstone'
]
streamed._current_row[:] = BEFORE
MERGED = [42, True]
TO_MERGE = [self._make_value(item) for item in MERGED]
streamed._merge_values(TO_MERGE)
self.assertEqual(streamed.rows, [BEFORE + MERGED])
self.assertEqual(streamed._current_row, [])
def test_merge_values_partial_and_filled_plus(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
streamed._metadata = self._make_result_set_metadata(FIELDS)
BEFORE = [
self._make_value(u'Phred Phlyntstone')
]
streamed._current_row[:] = BEFORE
MERGED = [
42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone',
]
TO_MERGE = [self._make_value(item) for item in MERGED]
VALUES = BEFORE + MERGED
streamed._merge_values(TO_MERGE)
self.assertEqual(streamed.rows, [VALUES[0:3], VALUES[3:6]])
self.assertEqual(streamed._current_row, VALUES[6:])
def test_one_or_none_no_value(self):
streamed = self._make_one(_MockCancellableIterator())
with mock.patch.object(streamed, 'consume_next') as consume_next:
consume_next.side_effect = StopIteration
self.assertIsNone(streamed.one_or_none())
def test_one_or_none_single_value(self):
streamed = self._make_one(_MockCancellableIterator())
streamed._rows = ['foo']
with mock.patch.object(streamed, 'consume_next') as consume_next:
consume_next.side_effect = StopIteration
self.assertEqual(streamed.one_or_none(), 'foo')
def test_one_or_none_multiple_values(self):
streamed = self._make_one(_MockCancellableIterator())
streamed._rows = ['foo', 'bar']
with self.assertRaises(ValueError):
streamed.one_or_none()
def test_one_or_none_consumed_stream(self):
streamed = self._make_one(_MockCancellableIterator())
streamed._metadata = object()
with self.assertRaises(RuntimeError):
streamed.one_or_none()
def test_one_single_value(self):
streamed = self._make_one(_MockCancellableIterator())
streamed._rows = ['foo']
with mock.patch.object(streamed, 'consume_next') as consume_next:
consume_next.side_effect = StopIteration
self.assertEqual(streamed.one(), 'foo')
def test_one_no_value(self):
from google.cloud import exceptions
iterator = _MockCancellableIterator(['foo'])
streamed = self._make_one(iterator)
with mock.patch.object(streamed, 'consume_next') as consume_next:
consume_next.side_effect = StopIteration
with self.assertRaises(exceptions.NotFound):
streamed.one()
def test_consume_next_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
with self.assertRaises(StopIteration):
streamed.consume_next()
def test_consume_next_first_set_partial(self):
TXN_ID = b'DEADBEEF'
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(
FIELDS, transaction_id=TXN_ID)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._make_value(bare) for bare in BARE]
result_set = self._make_partial_result_set(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
source = mock.Mock(_transaction_id=None, spec=['_transaction_id'])
streamed = self._make_one(iterator, source=source)
streamed.consume_next()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertEqual(streamed.metadata, metadata)
self.assertEqual(streamed.resume_token, result_set.resume_token)
self.assertEqual(source._transaction_id, TXN_ID)
def test_consume_next_first_set_partial_existing_txn_id(self):
TXN_ID = b'DEADBEEF'
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(
FIELDS, transaction_id=b'')
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._make_value(bare) for bare in BARE]
result_set = self._make_partial_result_set(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
source = mock.Mock(_transaction_id=TXN_ID, spec=['_transaction_id'])
streamed = self._make_one(iterator, source=source)
streamed.consume_next()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertEqual(streamed.metadata, metadata)
self.assertEqual(streamed.resume_token, result_set.resume_token)
self.assertEqual(source._transaction_id, TXN_ID)
def test_consume_next_w_partial_result(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
VALUES = [
self._make_value(u'Phred '),
]
result_set = self._make_partial_result_set(VALUES, chunked_value=True)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed.consume_next()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, VALUES[0])
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_next_w_pending_chunk(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
BARE = [
u'Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone',
]
VALUES = [self._make_value(bare) for bare in BARE]
result_set = self._make_partial_result_set(VALUES)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed._metadata = self._make_result_set_metadata(FIELDS)
streamed._pending_chunk = self._make_value(u'Phred ')
streamed.consume_next()
self.assertEqual(streamed.rows, [
[u'Phred Phlyntstone', BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
])
self.assertEqual(streamed._current_row, [BARE[6]])
self.assertIsNone(streamed._pending_chunk)
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_next_last_set(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(FIELDS)
stats = self._make_result_set_stats(
rows_returned="1",
elapsed_time="1.23 secs",
cpu_time="0.98 secs",
)
BARE = [u'Phred Phlyntstone', 42, True]
VALUES = [self._make_value(bare) for bare in BARE]
result_set = self._make_partial_result_set(VALUES, stats=stats)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed._metadata = metadata
streamed.consume_next()
self.assertEqual(streamed.rows, [BARE])
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._stats, stats)
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_all_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
streamed.consume_all()
def test_consume_all_one_result_set_partial(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._make_value(bare) for bare in BARE]
result_set = self._make_partial_result_set(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed.consume_all()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertEqual(streamed.metadata, metadata)
def test_consume_all_multiple_result_sets_filled(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(FIELDS)
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone', 41, True,
]
VALUES = [self._make_value(bare) for bare in BARE]
result_set1 = self._make_partial_result_set(
VALUES[:4], metadata=metadata)
result_set2 = self._make_partial_result_set(VALUES[4:])
iterator = _MockCancellableIterator(result_set1, result_set2)
streamed = self._make_one(iterator)
streamed.consume_all()
self.assertEqual(streamed.rows, [
[BARE[0], BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
[BARE[6], BARE[7], BARE[8]],
])
self.assertEqual(streamed._current_row, [])
self.assertIsNone(streamed._pending_chunk)
def test___iter___empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
found = list(streamed)
self.assertEqual(found, [])
def test___iter___one_result_set_partial(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._make_value(bare) for bare in BARE]
result_set = self._make_partial_result_set(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
found = list(streamed)
self.assertEqual(found, [])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertEqual(streamed.metadata, metadata)
def test___iter___multiple_result_sets_filled(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(FIELDS)
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone', 41, True,
]
VALUES = [self._make_value(bare) for bare in BARE]
result_set1 = self._make_partial_result_set(
VALUES[:4], metadata=metadata)
result_set2 = self._make_partial_result_set(VALUES[4:])
iterator = _MockCancellableIterator(result_set1, result_set2)
streamed = self._make_one(iterator)
found = list(streamed)
self.assertEqual(found, [
[BARE[0], BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
[BARE[6], BARE[7], BARE[8]],
])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
self.assertIsNone(streamed._pending_chunk)
def test___iter___w_existing_rows_read(self):
FIELDS = [
self._make_scalar_field('full_name', 'STRING'),
self._make_scalar_field('age', 'INT64'),
self._make_scalar_field('married', 'BOOL'),
]
metadata = self._make_result_set_metadata(FIELDS)
ALREADY = [
[u'Pebbylz Phlyntstone', 4, False],
[u'Dino Rhubble', 4, False],
]
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone', 41, True,
]
VALUES = [self._make_value(bare) for bare in BARE]
result_set1 = self._make_partial_result_set(
VALUES[:4], metadata=metadata)
result_set2 = self._make_partial_result_set(VALUES[4:])
iterator = _MockCancellableIterator(result_set1, result_set2)
streamed = self._make_one(iterator)
streamed._rows[:] = ALREADY
found = list(streamed)
self.assertEqual(found, ALREADY + [
[BARE[0], BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
[BARE[6], BARE[7], BARE[8]],
])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
self.assertIsNone(streamed._pending_chunk)
class _MockCancellableIterator(object):
cancel_calls = 0
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
def __next__(self): # pragma: NO COVER Py3k
return self.next()
class TestStreamedResultSet_JSON_acceptance_tests(unittest.TestCase):
_json_tests = None
def _getTargetClass(self):
from google.cloud.spanner.streamed import StreamedResultSet
return StreamedResultSet
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _load_json_test(self, test_name):
import os
if self.__class__._json_tests is None:
dirname = os.path.dirname(__file__)
filename = os.path.join(
dirname, 'streaming-read-acceptance-test.json')
raw = _parse_streaming_read_acceptance_tests(filename)
tests = self.__class__._json_tests = {}
for (name, partial_result_sets, results) in raw:
tests[name] = partial_result_sets, results
return self.__class__._json_tests[test_name]
# Non-error cases
def _match_results(self, testcase_name, assert_equality=None):
partial_result_sets, expected = self._load_json_test(testcase_name)
iterator = _MockCancellableIterator(*partial_result_sets)
partial = self._make_one(iterator)
partial.consume_all()
if assert_equality is not None:
assert_equality(partial.rows, expected)
else:
self.assertEqual(partial.rows, expected)
def test_basic(self):
self._match_results('Basic Test')
def test_string_chunking(self):
self._match_results('String Chunking Test')
def test_string_array_chunking(self):
self._match_results('String Array Chunking Test')
def test_string_array_chunking_with_nulls(self):
self._match_results('String Array Chunking Test With Nulls')
def test_string_array_chunking_with_empty_strings(self):
self._match_results('String Array Chunking Test With Empty Strings')
def test_string_array_chunking_with_one_large_string(self):
self._match_results('String Array Chunking Test With One Large String')
def test_int64_array_chunking(self):
self._match_results('INT64 Array Chunking Test')
def test_float64_array_chunking(self):
import math
def assert_float_equality(lhs, rhs):
# NaN, +Inf, and -Inf can't be tested for equality
if lhs is None:
self.assertIsNone(rhs)
elif math.isnan(lhs):
self.assertTrue(math.isnan(rhs))
elif math.isinf(lhs):
self.assertTrue(math.isinf(rhs))
# but +Inf and -Inf can be tested for magnitude
self.assertTrue((lhs > 0) == (rhs > 0))
else:
self.assertEqual(lhs, rhs)
def assert_rows_equality(lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
for l_rows, r_rows in zip(lhs, rhs):
self.assertEqual(len(l_rows), len(r_rows))
for l_row, r_row in zip(l_rows, r_rows):
self.assertEqual(len(l_row), len(r_row))
for l_cell, r_cell in zip(l_row, r_row):
assert_float_equality(l_cell, r_cell)
self._match_results(
'FLOAT64 Array Chunking Test', assert_rows_equality)
def test_struct_array_chunking(self):
self._match_results('Struct Array Chunking Test')
def test_nested_struct_array(self):
self._match_results('Nested Struct Array Test')
def test_nested_struct_array_chunking(self):
self._match_results('Nested Struct Array Chunking Test')
def test_struct_array_and_string_chunking(self):
self._match_results('Struct Array And String Chunking Test')
def test_multiple_row_single_chunk(self):
self._match_results('Multiple Row Single Chunk')
def test_multiple_row_multiple_chunks(self):
self._match_results('Multiple Row Multiple Chunks')
def test_multiple_row_chunks_non_chunks_interleaved(self):
self._match_results('Multiple Row Chunks/Non Chunks Interleaved')
def _generate_partial_result_sets(prs_text_pbs):
from google.protobuf.json_format import Parse
from google.cloud.proto.spanner.v1.result_set_pb2 import PartialResultSet
partial_result_sets = []
for prs_text_pb in prs_text_pbs:
prs = PartialResultSet()
partial_result_sets.append(Parse(prs_text_pb, prs))
return partial_result_sets
def _normalize_int_array(cell):
normalized = []
for subcell in cell:
if subcell is not None:
subcell = int(subcell)
normalized.append(subcell)
return normalized
def _normalize_float(cell):
if cell == u'Infinity':
return float('inf')
if cell == u'-Infinity':
return float('-inf')
if cell == u'NaN':
return float('nan')
if cell is not None:
return float(cell)
def _normalize_results(rows_data, fields):
"""Helper for _parse_streaming_read_acceptance_tests"""
from google.cloud.proto.spanner.v1 import type_pb2
normalized = []
for row_data in rows_data:
row = []
assert len(row_data) == len(fields)
for cell, field in zip(row_data, fields):
if field.type.code == type_pb2.INT64:
cell = int(cell)
if field.type.code == type_pb2.FLOAT64:
cell = _normalize_float(cell)
elif field.type.code == type_pb2.BYTES:
cell = cell.encode('utf8')
elif field.type.code == type_pb2.ARRAY:
if field.type.array_element_type.code == type_pb2.INT64:
cell = _normalize_int_array(cell)
elif field.type.array_element_type.code == type_pb2.FLOAT64:
cell = [_normalize_float(subcell) for subcell in cell]
row.append(cell)
normalized.append(row)
return normalized
def _parse_streaming_read_acceptance_tests(filename):
"""Parse acceptance tests from JSON
See streaming-read-acceptance-test.json
"""
import json
with open(filename) as json_file:
test_json = json.load(json_file)
for test in test_json['tests']:
name = test['name']
partial_result_sets = _generate_partial_result_sets(test['chunks'])
fields = partial_result_sets[0].metadata.row_type.fields
result = _normalize_results(test['result']['value'], fields)
yield name, partial_result_sets, result
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import util
from tensorflow.python.estimator.export.export import build_all_signature_defs
from tensorflow.python.estimator.export.export import get_temp_export_dir
from tensorflow.python.estimator.export.export import get_timestamped_export_dir
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'self', 'config'])
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `RunConfig` object containing information
about the execution environment. It is passed on to the `model_fn`, if the
`model_fn` has a parameter named "config" (and input functions in the same
manner). If the `config` parameter is not passed, it is instantiated by the
`Estimator`. Not passing config means that defaults useful for local execution
are used. `Estimator` makes config available to the model (for instance, to
allow specialization based on the number of workers available), and also uses
some of its fields to control internals, especially regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(alanyee): remove this suppression after it is no longer needed
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
self._device_fn = _get_replica_device_setter(self._config)
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {})
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with following signature:
`def model_fn(features, labels, mode, config)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config)
return public_model_fn
# TODO(ispir): support a list of names
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string or a list of string, name of the tensor.
Returns:
Numpy array - value of the tensor.
Raises:
ValueError: If the Estimator has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
return training.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
Raises:
ValueError: If the Estimator has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
return [name for name, _ in training.list_variables(self.model_dir)]
def latest_checkpoint(self):
"""Finds the filename of latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
return saver.latest_checkpoint(self.model_dir)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data input_fn.
Args:
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
steps: Number of steps for which to train model. If `None`, train forever
or train until input_fn generates the `OutOfRange` error or
`StopIteration` exception. 'steps' works incrementally. If you call two
times train(steps=10) then training occurs in total 20 steps. If
`OutOfRange` or `StopIteration` occurs in the middle, training stops
before 20 steps. If you don't want to have incremental behavior please
set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until input_fn generates the `OutOfRange` error
or `StopIteration` exception. If set, `steps` must be `None`. If
`OutOfRange` or `StopIteration` occurs in the middle, training stops
before `max_steps` steps.
Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps` is <= 0.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))
saving_listeners = _check_listeners_type(saving_listeners)
loss = self._train_model(input_fn, hooks, saving_listeners)
logging.info('Loss for final step: %s.', loss)
return self
def _convert_train_steps_to_hooks(self, steps, max_steps):
if steps is not None or max_steps is not None:
return [training.StopAtStepHook(steps, max_steps)]
else:
return []
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data input_fn.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or
`SparseTensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_eval_steps_to_hooks(steps))
return self._evaluate_model(
input_fn=input_fn,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
def _convert_eval_steps_to_hooks(self, steps):
if steps is None:
return []
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Yields predictions for given features.
Args:
input_fn: Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'.format(
self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(g)
features = self._get_features_from_input_fn(
input_fn, model_fn_lib.ModeKeys.PREDICT)
estimator_spec = self._call_model_fn(
features, None, model_fn_lib.ModeKeys.PREDICT, self.config)
predictions = self._extract_keys(estimator_spec.predictions, predict_keys)
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
allowed_overrides = set(['_call_input_fn', '_create_global_step',
'_convert_train_steps_to_hooks',
'_convert_eval_steps_to_hooks'])
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members - allowed_overrides
overridden_members = [
m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overridden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overridden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
This method builds a new graph by first calling the
serving_input_receiver_fn to obtain feature `Tensor`s, and then calling
this `Estimator`'s model_fn to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given export_dir_base, and writes
a `SavedModel` into it containing a single `MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the export_outputs dict returned from the model_fn, named using
the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
Extra assets may be written into the SavedModel via the extra_assets
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if no serving_input_receiver_fn is provided, no export_outputs
are provided, or no checkpoint can be found.
"""
if serving_input_receiver_fn is None:
raise ValueError('serving_input_receiver_fn must be defined.')
with ops.Graph().as_default() as g:
self._create_and_assert_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
serving_input_receiver = serving_input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=serving_input_receiver.features,
labels=None,
mode=model_fn_lib.ModeKeys.PREDICT,
config=self.config)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = build_all_signature_defs(
serving_input_receiver.receiver_tensors,
estimator_spec.export_outputs,
serving_input_receiver.receiver_tensors_alternatives)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = get_timestamped_export_dir(export_dir_base)
temp_export_dir = get_temp_export_dir(export_dir)
# TODO(soergel): Consider whether MonitoredSession makes sense here
with tf_session.Session() as session:
saver_for_restore = estimator_spec.scaffold.saver or saver.Saver(
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# TODO(b/36111876): replace legacy_init_op with main_op mechanism
# pylint: disable=protected-access
local_init_op = (
estimator_spec.scaffold.local_init_op or
monitored_session.Scaffold._default_local_init_op())
# pylint: enable=protected-access
# Perform the export
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=local_init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _get_features_from_input_fn(self, input_fn, mode):
result = self._call_input_fn(input_fn, mode)
if not ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
logging.warning('Input graph does not contain a QueueRunner. '
'That means predict yields forever. '
'This is probably a mistake.')
if isinstance(result, (list, tuple)):
return result[0]
return result
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
result = self._call_input_fn(input_fn, mode)
if isinstance(result, (list, tuple)):
if len(result) != 2:
raise ValueError(
'input_fn should return (feautures, labels) as a len 2 tuple.')
return result
return result, None
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length then others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _create_global_step(self, graph):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection ${tf.GraphKeys.GLOBAL_STEP}.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `Tensor`.
"""
return training.create_global_step(graph)
def _create_and_assert_global_step(self, graph):
"""Creates and asserts properties of the global step.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `Tensor`.
"""
step = self._create_global_step(graph)
assert step == training.get_global_step()
assert step.dtype.is_integer
return step
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments.
"""
del mode # unused
input_fn_args = util.fn_args(input_fn)
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params
if 'config' in input_fn_args:
kwargs['config'] = self.config
with ops.device('/cpu:0'):
return input_fn(**kwargs)
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
model_fn_results = self._model_fn(features=features, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks, saving_listeners):
worker_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
global_step_read_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN)
with ops.control_dependencies([global_step_read_tensor]):
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
# Check if the user created a loss summary, and add one if they didn't.
# We assume here that the summary is called 'loss'. If it is not, we will
# make another one with the name 'loss' to ensure it shows up in the right
# graph in TensorBoard.
if not any([x.op.name == 'loss'
for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):
summary.scalar('loss', estimator_spec.loss)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
worker_hooks.extend(hooks)
worker_hooks.extend([
training.NanTensorHook(estimator_spec.loss),
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=100)
])
worker_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
saver_hooks = [
h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
if not saver_hooks:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
saver_hooks = [chief_hooks[0]]
if saving_listeners:
if not saver_hooks:
raise ValueError(
'There should be a CheckpointSaverHook to use saving_listeners. '
'Please set one of the RunConfig.save_checkpoints_steps or '
'RunConfig.save_checkpoints_secs.')
else:
# It is expected to have one CheckpointSaverHook. If multiple, we pick
# up the first one to add listener.
saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=(
tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config,
log_step_count_steps=self._config.log_step_count_steps) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_model(self,
input_fn,
hooks=None,
checkpoint_path=None,
name=''):
"""Evaluates the model using the training.evaluation library."""
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise ValueError('Could not find trained model in model_dir: {}.'.
format(self._model_dir))
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.EVAL)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, self.config)
if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' % (
model_fn_lib.LOSS_METRIC_KEY) +
'already defines a default metric with the same name.')
estimator_spec.eval_metric_ops[
model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(
estimator_spec.eval_metric_ops)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
all_hooks = list(hooks or [])
all_hooks.extend(list(estimator_spec.evaluation_hooks or []))
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=estimator_spec.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=all_hooks,
config=self._session_config)
_write_dict_to_summary(
output_dir=eval_dir,
dictionary=eval_results,
current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])
return eval_results
def _check_checkpoint_available(model_dir):
latest_path = saver.latest_checkpoint(model_dir)
if not latest_path:
raise ValueError(
'Could not find trained model in model_dir: {}.'.format(model_dir))
def _check_hooks_type(hooks):
"""Returns hooks if all are SessionRunHook, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _check_listeners_type(saving_listeners):
"""Check listeners type."""
listeners = list(saving_listeners or [])
for l in listeners:
if not isinstance(l, training.CheckpointSaverListener):
raise TypeError(
'saving_listeners must be a list of CheckpointSaverListener, '
'given: {}'.format(l))
return listeners
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _verify_model_fn_args(model_fn, params):
"""Verifies model fn arguments."""
args = set(util.fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
if tf_inspect.ismethod(model_fn):
if 'self' in args:
args.remove('self')
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary)))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
|
|
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Tests for L{wokkel.muc}
"""
from __future__ import division, absolute_import
from datetime import datetime
from dateutil.tz import tzutc
from zope.interface import verify
from twisted.trial import unittest
from twisted.internet import defer, task
from twisted.python.compat import iteritems, unicode
from twisted.words.xish import domish, xpath
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.error import StanzaError
from twisted.words.protocols.jabber.xmlstream import TimeoutError, toResponse
from wokkel import data_form, delay, iwokkel, muc
from wokkel.generic import parseXml
from wokkel.test.helpers import TestableStreamManager
NS_MUC_ADMIN = 'http://jabber.org/protocol/muc#admin'
def calledAsync(fn):
"""
Function wrapper that fires a deferred upon calling the given function.
"""
d = defer.Deferred()
def func(*args, **kwargs):
try:
result = fn(*args, **kwargs)
except:
d.errback()
else:
d.callback(result)
return d, func
class StatusCodeTest(unittest.TestCase):
"""
Tests for L{muc.STATUS_CODE}.
"""
def test_lookupByValue(self):
"""
The registered MUC status codes map to STATUS_CODE value constants.
Note: the identifiers used in the dictionary of status codes are
borrowed from U{XEP-0306<http://xmpp.org/extensions/xep-0306.html>}
that defines Extensible Status Conditions for Multi-User Chat. If this
specification is implemented itself, the dictionary could move there.
"""
codes = {
100: 'realjid-public',
101: 'affiliation-changed',
102: 'unavailable-shown',
103: 'unavailable-not-shown',
104: 'configuration-changed',
110: 'self-presence',
170: 'logging-enabled',
171: 'logging-disabled',
172: 'non-anonymous',
173: 'semi-anonymous',
174: 'fully-anonymous',
201: 'room-created',
210: 'nick-assigned',
301: 'banned',
303: 'new-nick',
307: 'kicked',
321: 'removed-affiliation',
322: 'removed-membership',
332: 'removed-shutdown',
}
for code, condition in iteritems(codes):
constantName = condition.replace('-', '_').upper()
self.assertEqual(getattr(muc.STATUS_CODE, constantName),
muc.STATUS_CODE.lookupByValue(code))
class StatusesTest(unittest.TestCase):
"""
Tests for L{muc.Statuses}.
"""
def setUp(self):
self.mucStatuses = muc.Statuses()
self.mucStatuses.add(muc.STATUS_CODE.SELF_PRESENCE)
self.mucStatuses.add(muc.STATUS_CODE.ROOM_CREATED)
def test_interface(self):
"""
Instances of L{Statuses} provide L{iwokkel.IMUCStatuses}.
"""
verify.verifyObject(iwokkel.IMUCStatuses, self.mucStatuses)
def test_contains(self):
"""
The status contained are 'in' the container.
"""
self.assertIn(muc.STATUS_CODE.SELF_PRESENCE, self.mucStatuses)
self.assertIn(muc.STATUS_CODE.ROOM_CREATED, self.mucStatuses)
self.assertNotIn(muc.STATUS_CODE.NON_ANONYMOUS, self.mucStatuses)
def test_iter(self):
"""
All statuses can be iterated over.
"""
statuses = set()
for status in self.mucStatuses:
statuses.add(status)
self.assertEqual(set([muc.STATUS_CODE.SELF_PRESENCE,
muc.STATUS_CODE.ROOM_CREATED]), statuses)
def test_len(self):
"""
The number of items in this container is returned by C{__len__}.
"""
self.assertEqual(2, len(self.mucStatuses))
class GroupChatTest(unittest.TestCase):
"""
Tests for L{muc.GroupChat}.
"""
def test_toElementDelay(self):
"""
If the delay attribute is set, toElement has it rendered.
"""
message = muc.GroupChat()
message.delay = delay.Delay(stamp=datetime(2002, 10, 13, 23, 58, 37,
tzinfo=tzutc()))
element = message.toElement()
query = "/message/delay[@xmlns='%s']" % (delay.NS_DELAY,)
nodes = xpath.queryForNodes(query, element)
self.assertNotIdentical(None, nodes, "Missing delay element")
def test_toElementDelayLegacy(self):
"""
If legacy delay is requested, the legacy format is rendered.
"""
message = muc.GroupChat()
message.delay = delay.Delay(stamp=datetime(2002, 10, 13, 23, 58, 37,
tzinfo=tzutc()))
element = message.toElement(legacyDelay=True)
query = "/message/x[@xmlns='%s']" % (delay.NS_JABBER_DELAY,)
nodes = xpath.queryForNodes(query, element)
self.assertNotIdentical(None, nodes, "Missing legacy delay element")
class HistoryOptionsTest(unittest.TestCase):
"""
Tests for L{muc.HistoryOptionsTest}.
"""
def test_toElement(self):
"""
toElement renders the history element in the right namespace.
"""
history = muc.HistoryOptions()
element = history.toElement()
self.assertEqual(muc.NS_MUC, element.uri)
self.assertEqual('history', element.name)
def test_toElementMaxStanzas(self):
"""
If C{maxStanzas} is set, the element has the attribute C{'maxstanzas'}.
"""
history = muc.HistoryOptions(maxStanzas=10)
element = history.toElement()
self.assertEqual(u'10', element.getAttribute('maxstanzas'))
def test_toElementSince(self):
"""
If C{since} is set, the attribute C{'since'} has a rendered timestamp.
"""
history = muc.HistoryOptions(since=datetime(2002, 10, 13, 23, 58, 37,
tzinfo=tzutc()))
element = history.toElement()
self.assertEqual(u'2002-10-13T23:58:37Z',
element.getAttribute('since'))
class UserPresenceTest(unittest.TestCase):
"""
Tests for L{muc.UserPresence}.
"""
def test_fromElementNoUserElement(self):
"""
Without user element, all associated attributes are None.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertIdentical(None, presence.affiliation)
self.assertIdentical(None, presence.role)
self.assertIdentical(None, presence.entity)
self.assertIdentical(None, presence.nick)
self.assertEqual(0, len(presence.mucStatuses))
def test_fromElementUnknownChild(self):
"""
Unknown child elements are ignored.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<status xmlns='myns' code='110'/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertEqual(0, len(presence.mucStatuses))
def test_fromElementStatusOne(self):
"""
Status codes are extracted.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
<status code='110'/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertIn(muc.STATUS_CODE.SELF_PRESENCE, presence.mucStatuses)
def test_fromElementStatusMultiple(self):
"""
Multiple status codes are all extracted.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
<status code='100'/>
<status code='110'/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertIn(muc.STATUS_CODE.SELF_PRESENCE, presence.mucStatuses)
self.assertIn(muc.STATUS_CODE.REALJID_PUBLIC, presence.mucStatuses)
def test_fromElementStatusEmpty(self):
"""
Empty status elements are ignored.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
<status/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertEqual(0, len(presence.mucStatuses))
def test_fromElementStatusBad(self):
"""
Bad status codes are ignored.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
<status code="badvalue"/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertEqual(0, len(presence.mucStatuses))
def test_fromElementStatusUnknown(self):
"""
Unknown status codes are not recorded in C{mucStatuses}.
"""
xml = """
<presence from='[email protected]/thirdwitch'
id='026B3509-2CCE-4D69-96D6-25F41FFDC408'
to='[email protected]/pda'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
<status code="999"/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertEqual(0, len(presence.mucStatuses))
def test_fromElementItem(self):
"""
Item attributes are parsed properly.
"""
xml = """
<presence from='[email protected]/thirdwitch'
to='[email protected]/desktop'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member'
jid='[email protected]/pda'
role='participant'
nick='thirdwitch'/>
</x>
</presence>
"""
element = parseXml(xml)
presence = muc.UserPresence.fromElement(element)
self.assertEqual(u'member', presence.affiliation)
self.assertEqual(u'participant', presence.role)
self.assertEqual(JID('[email protected]/pda'), presence.entity)
self.assertEqual(u'thirdwitch', presence.nick)
class MUCClientProtocolTest(unittest.TestCase):
"""
Tests for L{muc.MUCClientProtocol}.
"""
def setUp(self):
self.clock = task.Clock()
self.sessionManager = TestableStreamManager(reactor=self.clock)
self.stub = self.sessionManager.stub
self.protocol = muc.MUCClientProtocol(reactor=self.clock)
self.protocol.setHandlerParent(self.sessionManager)
self.roomIdentifier = 'test'
self.service = 'conference.example.org'
self.nick = 'Nick'
self.occupantJID = JID(tuple=(self.roomIdentifier,
self.service,
self.nick))
self.roomJID = self.occupantJID.userhostJID()
self.userJID = JID('[email protected]/Testing')
def test_initNoReactor(self):
"""
If no reactor is passed, the default reactor is used.
"""
protocol = muc.MUCClientProtocol()
from twisted.internet import reactor
self.assertEqual(reactor, protocol._reactor)
def test_groupChatReceived(self):
"""
Messages of type groupchat are parsed and passed to L{groupChatReceived}.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
</message>
""" % (self.occupantJID)
def groupChatReceived(message):
self.assertEquals('test', message.body, "Wrong group chat message")
self.assertEquals(self.roomIdentifier, message.sender.user,
'Wrong room identifier')
d, self.protocol.groupChatReceived = calledAsync(groupChatReceived)
self.stub.send(parseXml(xml))
return d
def test_groupChatReceivedNotOverridden(self):
"""
If L{groupChatReceived} has not been overridden, no errors should occur.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
</message>
""" % (self.occupantJID)
self.stub.send(parseXml(xml))
def test_join(self):
"""
Joining a room waits for confirmation, deferred fires user presence.
"""
def cb(presence):
self.assertEquals(self.occupantJID, presence.sender)
# Join the room
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(cb)
element = self.stub.output[-1]
self.assertEquals('presence', element.name, "Need to be presence")
self.assertNotIdentical(None, element.x, 'No muc x element')
# send back user presence, they joined
xml = """
<presence from='%s@%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.roomIdentifier, self.service, self.nick)
self.stub.send(parseXml(xml))
return d
def test_joinHistory(self):
"""
Passing a history parameter sends a 'maxStanzas' history limit.
"""
historyOptions = muc.HistoryOptions(maxStanzas=10)
d = self.protocol.join(self.roomJID, self.nick,
historyOptions)
element = self.stub.output[-1]
query = "/*/x[@xmlns='%s']/history[@xmlns='%s']" % (muc.NS_MUC,
muc.NS_MUC)
result = xpath.queryForNodes(query, element)
history = result[0]
self.assertEquals('10', history.getAttribute('maxstanzas'))
# send back user presence, they joined
xml = """
<presence from='%s@%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.roomIdentifier, self.service, self.nick)
self.stub.send(parseXml(xml))
return d
def test_joinForbidden(self):
"""
A forbidden error in response to a join errbacks with L{StanzaError}.
"""
def cb(error):
self.assertEquals('forbidden', error.condition,
'Wrong muc condition')
d = self.protocol.join(self.roomJID, self.nick)
self.assertFailure(d, StanzaError)
d.addCallback(cb)
# send back error, forbidden
xml = u"""
<presence from='%s' type='error'>
<error type='auth'>
<forbidden xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
</error>
</presence>
""" % (self.occupantJID)
self.stub.send(parseXml(xml))
return d
def test_joinForbiddenFromRoomJID(self):
"""
An error response to a join sent from the room JID should errback.
Some service implementations send error stanzas from the room JID
instead of the JID the join presence was sent to.
"""
d = self.protocol.join(self.roomJID, self.nick)
self.assertFailure(d, StanzaError)
# send back error, forbidden
xml = u"""
<presence from='%s' type='error'>
<error type='auth'>
<forbidden xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
</error>
</presence>
""" % (self.roomJID)
self.stub.send(parseXml(xml))
return d
def test_joinBadJID(self):
"""
Client joining a room and getting a jid-malformed error.
"""
def cb(error):
self.assertEquals('jid-malformed', error.condition,
'Wrong muc condition')
d = self.protocol.join(self.roomJID, self.nick)
self.assertFailure(d, StanzaError)
d.addCallback(cb)
# send back error, bad JID
xml = u"""
<presence from='%s' type='error'>
<error type='modify'>
<jid-malformed xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
</error>
</presence>
""" % (self.occupantJID)
self.stub.send(parseXml(xml))
return d
def test_joinTimeout(self):
"""
After not receiving a response to a join, errback with L{TimeoutError}.
"""
d = self.protocol.join(self.roomJID, self.nick)
self.assertFailure(d, TimeoutError)
self.clock.advance(muc.DEFER_TIMEOUT)
return d
def test_joinPassword(self):
"""
Sending a password via presence to a password protected room.
"""
self.protocol.join(self.roomJID, self.nick, password='secret')
element = self.stub.output[-1]
self.assertTrue(xpath.matches(
u"/presence[@to='%s']/x/password"
"[text()='secret']" % (self.occupantJID,),
element),
'Wrong presence stanza')
def test_nick(self):
"""
Send a nick change to the server.
"""
newNick = 'newNick'
def cb(presence):
self.assertEquals(JID(tuple=(self.roomIdentifier,
self.service,
newNick)),
presence.sender)
d = self.protocol.nick(self.roomJID, newNick)
d.addCallback(cb)
element = self.stub.output[-1]
self.assertEquals('presence', element.name, "Need to be presence")
self.assertNotIdentical(None, element.x, 'No muc x element')
# send back user presence, nick changed
xml = u"""
<presence from='%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.roomJID, newNick)
self.stub.send(parseXml(xml))
return d
def test_nickConflict(self):
"""
If the server finds the new nick in conflict, the errback is called.
"""
newNick = 'newNick'
d = self.protocol.nick(self.roomJID, newNick)
self.assertFailure(d, StanzaError)
element = self.stub.output[-1]
self.assertEquals('presence', element.name, "Need to be presence")
self.assertNotIdentical(None, element.x, 'No muc x element')
# send back error presence, nick conflicted
xml = u"""
<presence from='%s/%s' type='error'>
<x xmlns='http://jabber.org/protocol/muc'/>
<error type='cancel'>
<conflict xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
</error>
</presence>
""" % (self.roomJID, newNick)
self.stub.send(parseXml(xml))
return d
def test_status(self):
"""
Change status
"""
def joined(_):
d = self.protocol.status(self.roomJID, 'xa', 'testing MUC')
d.addCallback(statusChanged)
return d
def statusChanged(presence):
self.assertEqual(self.occupantJID, presence.sender)
# Join the room
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(joined)
# Receive presence back from the room: joined.
xml = u"""
<presence to='%s' from='%s'/>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
# The presence for the status change should have been sent now.
element = self.stub.output[-1]
self.assertEquals('presence', element.name, "Need to be presence")
self.assertTrue(getattr(element, 'x', None), 'No muc x element')
# send back user presence, status changed
xml = u"""
<presence from='%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
<show>xa</show>
<status>testing MUC</status>
</presence>
""" % self.occupantJID
self.stub.send(parseXml(xml))
return d
def test_leave(self):
"""
Client leaves a room
"""
def joined(_):
return self.protocol.leave(self.roomJID)
# Join the room
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(joined)
# Receive presence back from the room: joined.
xml = u"""
<presence to='%s' from='%s'/>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
# The presence for leaving the room should have been sent now.
element = self.stub.output[-1]
self.assertEquals('unavailable', element['type'],
'Unavailable is not being sent')
# Receive presence back from the room: left.
xml = u"""
<presence to='%s' from='%s' type='unavailable'/>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
return d
def test_groupChat(self):
"""
Send private messages to muc entities.
"""
self.protocol.groupChat(self.roomJID, u'This is a test')
message = self.stub.output[-1]
self.assertEquals('message', message.name)
self.assertEquals(self.roomJID.full(), message.getAttribute('to'))
self.assertEquals('groupchat', message.getAttribute('type'))
self.assertEquals(u'This is a test', unicode(message.body))
def test_chat(self):
"""
Send private messages to muc entities.
"""
otherOccupantJID = JID(self.occupantJID.userhost()+'/OtherNick')
self.protocol.chat(otherOccupantJID, u'This is a test')
message = self.stub.output[-1]
self.assertEquals('message', message.name)
self.assertEquals(otherOccupantJID.full(), message.getAttribute('to'))
self.assertEquals('chat', message.getAttribute('type'))
self.assertEquals(u'This is a test', unicode(message.body))
def test_subject(self):
"""
Change subject of the room.
"""
self.protocol.subject(self.roomJID, u'This is a test')
message = self.stub.output[-1]
self.assertEquals('message', message.name)
self.assertEquals(self.roomJID.full(), message.getAttribute('to'))
self.assertEquals('groupchat', message.getAttribute('type'))
self.assertEquals(u'This is a test', unicode(message.subject))
def test_invite(self):
"""
Invite a user to a room
"""
invitee = JID('[email protected]')
self.protocol.invite(self.roomJID, invitee, u'This is a test')
message = self.stub.output[-1]
self.assertEquals('message', message.name)
self.assertEquals(self.roomJID.full(), message.getAttribute('to'))
self.assertEquals(muc.NS_MUC_USER, message.x.uri)
self.assertEquals(muc.NS_MUC_USER, message.x.invite.uri)
self.assertEquals(invitee.full(), message.x.invite.getAttribute('to'))
self.assertEquals(muc.NS_MUC_USER, message.x.invite.reason.uri)
self.assertEquals(u'This is a test', unicode(message.x.invite.reason))
def test_getRegisterForm(self):
"""
The response of a register form request should extract the form.
"""
def cb(form):
self.assertEquals('form', form.formType)
d = self.protocol.getRegisterForm(self.roomJID)
d.addCallback(cb)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']" % (muc.NS_REGISTER)
nodes = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, nodes, 'Missing query element')
self.assertRaises(StopIteration, next, nodes[0].elements())
xml = u"""
<iq from='%s' id='%s' to='%s' type='result'>
<query xmlns='jabber:iq:register'>
<x xmlns='jabber:x:data' type='form'>
<field type='hidden'
var='FORM_TYPE'>
<value>http://jabber.org/protocol/muc#register</value>
</field>
<field label='Desired Nickname'
type='text-single'
var='muc#register_roomnick'>
<required/>
</field>
</x>
</query>
</iq>
""" % (self.roomJID, iq['id'], self.userJID)
self.stub.send(parseXml(xml))
return d
def test_register(self):
"""
Client registering with a room.
http://xmpp.org/extensions/xep-0045.html#register
"""
def cb(iq):
# check for a result
self.assertEquals('result', iq['type'], 'We did not get a result')
d = self.protocol.register(self.roomJID,
{'muc#register_roomnick': 'thirdwitch'})
d.addCallback(cb)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']" % muc.NS_REGISTER
nodes = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, nodes, 'Invalid registration request')
form = data_form.findForm(nodes[0], muc.NS_MUC_REGISTER)
self.assertNotIdentical(None, form, 'Missing registration form')
self.assertEquals('submit', form.formType)
self.assertIn('muc#register_roomnick', form.fields)
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_registerCancel(self):
"""
Cancelling a registration request sends a cancel form.
"""
d = self.protocol.register(self.roomJID, None)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']" % muc.NS_REGISTER
nodes = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, nodes, 'Invalid registration request')
form = data_form.findForm(nodes[0], muc.NS_MUC_REGISTER)
self.assertNotIdentical(None, form, 'Missing registration form')
self.assertEquals('cancel', form.formType)
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_voice(self):
"""
Client requesting voice for a room.
"""
self.protocol.voice(self.occupantJID)
m = self.stub.output[-1]
query = ("/message/x[@type='submit']/field/value"
"[text()='%s']") % muc.NS_MUC_REQUEST
self.assertTrue(xpath.matches(query, m), 'Invalid voice message stanza')
def test_history(self):
"""
Converting a one to one chat to a multi-user chat.
"""
archive = []
thread = "e0ffe42b28561960c6b12b944a092794b9683a38"
# create messages
element = domish.Element((None, 'message'))
element['to'] = '[email protected]'
element['type'] = 'chat'
element.addElement('body', None, 'test')
element.addElement('thread', None, thread)
archive.append({'stanza': element,
'timestamp': datetime(2002, 10, 13, 23, 58, 37,
tzinfo=tzutc())})
element = domish.Element((None, 'message'))
element['to'] = '[email protected]'
element['type'] = 'chat'
element.addElement('body', None, 'yo')
element.addElement('thread', None, thread)
archive.append({'stanza': element,
'timestamp': datetime(2002, 10, 13, 23, 58, 43,
tzinfo=tzutc())})
self.protocol.history(self.occupantJID, archive)
while len(self.stub.output)>0:
element = self.stub.output.pop()
# check for delay element
self.assertEquals('message', element.name, 'Wrong stanza')
self.assertTrue(xpath.matches("/message/delay", element),
'Invalid history stanza')
def test_getConfiguration(self):
"""
The response of a configure form request should extract the form.
"""
def cb(form):
self.assertEquals('form', form.formType)
d = self.protocol.getConfiguration(self.roomJID)
d.addCallback(cb)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']" % (muc.NS_MUC_OWNER)
nodes = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, nodes, 'Missing query element')
self.assertRaises(StopIteration, next, nodes[0].elements())
xml = u"""
<iq from='%s' id='%s' to='%s' type='result'>
<query xmlns='http://jabber.org/protocol/muc#owner'>
<x xmlns='jabber:x:data' type='form'>
<field type='hidden'
var='FORM_TYPE'>
<value>http://jabber.org/protocol/muc#roomconfig</value>
</field>
<field label='Natural-Language Room Name'
type='text-single'
var='muc#roomconfig_roomname'/>
</x>
</query>
</iq>
""" % (self.roomJID, iq['id'], self.userJID)
self.stub.send(parseXml(xml))
return d
def test_getConfigurationNoOptions(self):
"""
The response of a configure form request should extract the form.
"""
def cb(form):
self.assertIdentical(None, form)
d = self.protocol.getConfiguration(self.roomJID)
d.addCallback(cb)
iq = self.stub.output[-1]
xml = u"""
<iq from='%s' id='%s' to='%s' type='result'>
<query xmlns='http://jabber.org/protocol/muc#owner'/>
</iq>
""" % (self.roomJID, iq['id'], self.userJID)
self.stub.send(parseXml(xml))
return d
def test_configure(self):
"""
Default configure and changing the room name.
"""
def cb(iq):
self.assertEquals('result', iq['type'], 'Not a result')
values = {'muc#roomconfig_roomname': self.roomIdentifier}
d = self.protocol.configure(self.roomJID, values)
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEquals('set', iq.getAttribute('type'))
self.assertEquals(self.roomJID.full(), iq.getAttribute('to'))
query = "/iq/query[@xmlns='%s']" % (muc.NS_MUC_OWNER)
nodes = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, nodes, 'Bad configure request')
form = data_form.findForm(nodes[0], muc.NS_MUC_CONFIG)
self.assertNotIdentical(None, form, 'Missing configuration form')
self.assertEquals('submit', form.formType)
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_configureEmpty(self):
"""
Accept default configuration by sending an empty form.
"""
values = {}
d = self.protocol.configure(self.roomJID, values)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']" % (muc.NS_MUC_OWNER)
nodes = xpath.queryForNodes(query, iq)
form = data_form.findForm(nodes[0], muc.NS_MUC_CONFIG)
self.assertNotIdentical(None, form, 'Missing configuration form')
self.assertEquals('submit', form.formType)
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_configureCancel(self):
"""
Cancelling room configuration should send a cancel form.
"""
d = self.protocol.configure(self.roomJID, None)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']" % (muc.NS_MUC_OWNER)
nodes = xpath.queryForNodes(query, iq)
form = data_form.findForm(nodes[0], muc.NS_MUC_CONFIG)
self.assertNotIdentical(None, form, 'Missing configuration form')
self.assertEquals('cancel', form.formType)
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_getMemberList(self):
"""
Retrieving the member list returns a list of L{muc.AdminItem}s
The request asks for the affiliation C{'member'}.
"""
def cb(items):
self.assertEquals(1, len(items))
item = items[0]
self.assertEquals(JID(u'[email protected]'), item.entity)
self.assertEquals(u'thirdwitch', item.nick)
self.assertEquals(u'member', item.affiliation)
d = self.protocol.getMemberList(self.roomJID)
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEquals('get', iq.getAttribute('type'))
query = "/iq/query[@xmlns='%s']/item[@xmlns='%s']" % (muc.NS_MUC_ADMIN,
muc.NS_MUC_ADMIN)
items = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, items)
self.assertEquals(1, len(items))
self.assertEquals('member', items[0].getAttribute('affiliation'))
response = toResponse(iq, 'result')
query = response.addElement((NS_MUC_ADMIN, 'query'))
item = query.addElement('item')
item['affiliation'] ='member'
item['jid'] = '[email protected]'
item['nick'] = 'thirdwitch'
item['role'] = 'participant'
self.stub.send(response)
return d
def test_getAdminList(self):
"""
Retrieving the admin list returns a list of L{muc.AdminItem}s
The request asks for the affiliation C{'admin'}.
"""
d = self.protocol.getAdminList(self.roomJID)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']/item[@xmlns='%s']" % (muc.NS_MUC_ADMIN,
muc.NS_MUC_ADMIN)
items = xpath.queryForNodes(query, iq)
self.assertEquals('admin', items[0].getAttribute('affiliation'))
response = toResponse(iq, 'result')
query = response.addElement((NS_MUC_ADMIN, 'query'))
self.stub.send(response)
return d
def test_getBanList(self):
"""
Retrieving the ban list returns a list of L{muc.AdminItem}s
The request asks for the affiliation C{'outcast'}.
"""
def cb(items):
self.assertEquals(1, len(items))
item = items[0]
self.assertEquals(JID(u'[email protected]'), item.entity)
self.assertEquals(u'outcast', item.affiliation)
self.assertEquals(u'Trouble making', item.reason)
d = self.protocol.getBanList(self.roomJID)
d.addCallback(cb)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']/item[@xmlns='%s']" % (muc.NS_MUC_ADMIN,
muc.NS_MUC_ADMIN)
items = xpath.queryForNodes(query, iq)
self.assertEquals('outcast', items[0].getAttribute('affiliation'))
response = toResponse(iq, 'result')
query = response.addElement((NS_MUC_ADMIN, 'query'))
item = query.addElement('item')
item['affiliation'] ='outcast'
item['jid'] = '[email protected]'
item.addElement('reason', content='Trouble making')
self.stub.send(response)
return d
def test_getOwnerList(self):
"""
Retrieving the owner list returns a list of L{muc.AdminItem}s
The request asks for the affiliation C{'owner'}.
"""
d = self.protocol.getOwnerList(self.roomJID)
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']/item[@xmlns='%s']" % (muc.NS_MUC_ADMIN,
muc.NS_MUC_ADMIN)
items = xpath.queryForNodes(query, iq)
self.assertEquals('owner', items[0].getAttribute('affiliation'))
response = toResponse(iq, 'result')
query = response.addElement((NS_MUC_ADMIN, 'query'))
self.stub.send(response)
return d
def test_getModeratorList(self):
"""
Retrieving the moderator returns a list of L{muc.AdminItem}s.
The request asks for the role C{'moderator'}.
"""
def cb(items):
self.assertEquals(1, len(items))
item = items[0]
self.assertEquals(JID(u'[email protected]'), item.entity)
self.assertEquals(u'thirdwitch', item.nick)
self.assertEquals(u'moderator', item.role)
d = self.protocol.getModeratorList(self.roomJID)
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEquals('get', iq.getAttribute('type'))
query = "/iq/query[@xmlns='%s']/item[@xmlns='%s']" % (muc.NS_MUC_ADMIN,
muc.NS_MUC_ADMIN)
items = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, items)
self.assertEquals(1, len(items))
self.assertEquals('moderator', items[0].getAttribute('role'))
response = toResponse(iq, 'result')
query = response.addElement((NS_MUC_ADMIN, 'query'))
item = query.addElement('item')
item['affiliation'] ='member'
item['jid'] = '[email protected]'
item['nick'] = 'thirdwitch'
item['role'] = 'moderator'
self.stub.send(response)
return d
def test_modifyAffiliationList(self):
entities = [JID('[email protected]'),
JID('[email protected]')]
d = self.protocol.modifyAffiliationList(self.roomJID, entities,
'admin')
iq = self.stub.output[-1]
query = "/iq/query[@xmlns='%s']/item[@xmlns='%s']" % (muc.NS_MUC_ADMIN,
muc.NS_MUC_ADMIN)
items = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, items)
self.assertEquals(entities[0], JID(items[0].getAttribute('jid')))
self.assertEquals('admin', items[0].getAttribute('affiliation'))
self.assertEquals(entities[1], JID(items[1].getAttribute('jid')))
self.assertEquals('admin', items[1].getAttribute('affiliation'))
# Send a response to have the deferred fire.
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_grantVoice(self):
"""
Granting voice sends request to set role to 'participant'.
"""
nick = 'TroubleMaker'
def cb(give_voice):
self.assertTrue(give_voice, 'Did not give voice user')
d = self.protocol.grantVoice(self.roomJID, nick,
sender=self.userJID)
d.addCallback(cb)
iq = self.stub.output[-1]
query = (u"/iq[@type='set' and @to='%s']/query/item"
"[@role='participant']") % self.roomJID
self.assertTrue(xpath.matches(query, iq), 'Wrong voice stanza')
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_revokeVoice(self):
"""
Revoking voice sends request to set role to 'visitor'.
"""
nick = 'TroubleMaker'
d = self.protocol.revokeVoice(self.roomJID, nick,
reason="Trouble maker",
sender=self.userJID)
iq = self.stub.output[-1]
query = (u"/iq[@type='set' and @to='%s']/query/item"
"[@role='visitor']") % self.roomJID
self.assertTrue(xpath.matches(query, iq), 'Wrong voice stanza')
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_grantModerator(self):
"""
Granting moderator privileges sends request to set role to 'moderator'.
"""
nick = 'TroubleMaker'
d = self.protocol.grantModerator(self.roomJID, nick,
sender=self.userJID)
iq = self.stub.output[-1]
query = (u"/iq[@type='set' and @to='%s']/query/item"
"[@role='moderator']") % self.roomJID
self.assertTrue(xpath.matches(query, iq), 'Wrong voice stanza')
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_ban(self):
"""
Ban an entity in a room.
"""
banned = JID('[email protected]/TroubleMaker')
def cb(banned):
self.assertTrue(banned, 'Did not ban user')
d = self.protocol.ban(self.roomJID, banned, reason='Spam',
sender=self.userJID)
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertTrue(xpath.matches(
u"/iq[@type='set' and @to='%s']/query/item"
"[@affiliation='outcast']" % (self.roomJID,),
iq),
'Wrong ban stanza')
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_kick(self):
"""
Kick an entity from a room.
"""
nick = 'TroubleMaker'
def cb(kicked):
self.assertTrue(kicked, 'Did not kick user')
d = self.protocol.kick(self.roomJID, nick, reason='Spam',
sender=self.userJID)
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertTrue(xpath.matches(
u"/iq[@type='set' and @to='%s']/query/item"
"[@role='none']" % (self.roomJID,),
iq),
'Wrong kick stanza')
response = toResponse(iq, 'result')
self.stub.send(response)
return d
def test_destroy(self):
"""
Destroy a room.
"""
d = self.protocol.destroy(self.occupantJID, reason='Time to leave',
alternate=JID('other@%s' % self.service),
password='secret')
iq = self.stub.output[-1]
query = ("/iq[@type='set']/query[@xmlns='%s']/destroy[@xmlns='%s']" %
(muc.NS_MUC_OWNER, muc.NS_MUC_OWNER))
nodes = xpath.queryForNodes(query, iq)
self.assertNotIdentical(None, nodes, 'Bad configure request')
destroy = nodes[0]
self.assertEquals('Time to leave', unicode(destroy.reason))
response = toResponse(iq, 'result')
self.stub.send(response)
return d
class MUCClientTest(unittest.TestCase):
"""
Tests for C{muc.MUCClient}.
"""
def setUp(self):
self.clock = task.Clock()
self.sessionManager = TestableStreamManager(reactor=self.clock)
self.stub = self.sessionManager.stub
self.protocol = muc.MUCClient(reactor=self.clock)
self.protocol.setHandlerParent(self.sessionManager)
self.roomIdentifier = 'test'
self.service = 'conference.example.org'
self.nick = 'Nick'
self.occupantJID = JID(tuple=(self.roomIdentifier,
self.service,
self.nick))
self.roomJID = self.occupantJID.userhostJID()
self.userJID = JID('[email protected]/Testing')
def _createRoom(self):
"""
A helper method to create a test room.
"""
# create a room
room = muc.Room(self.roomJID, self.nick)
self.protocol._addRoom(room)
return room
def test_interface(self):
"""
Do instances of L{muc.MUCClient} provide L{iwokkel.IMUCClient}?
"""
verify.verifyObject(iwokkel.IMUCClient, self.protocol)
def _testPresence(self, sender='', available=True):
"""
Helper for presence tests.
"""
def userUpdatedStatus(room, user, show, status):
self.fail("Unexpected call to userUpdatedStatus")
def userJoinedRoom(room, user):
self.fail("Unexpected call to userJoinedRoom")
if available:
available = ""
else:
available = " type='unavailable'"
if sender:
sender = u" from='%s'" % sender
xml = u"""
<presence to='%s'%s%s>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.userJID, sender, available)
self.protocol.userUpdatedStatus = userUpdatedStatus
self.protocol.userJoinedRoom = userJoinedRoom
self.stub.send(parseXml(xml))
def test_availableReceivedEmptySender(self):
"""
Availability presence from empty sender is ignored.
"""
self._testPresence(sender='')
def test_availableReceivedNotInRoom(self):
"""
Availability presence from unknown entities is ignored.
"""
otherOccupantJID = JID(self.occupantJID.userhost()+'/OtherNick')
self._testPresence(sender=otherOccupantJID)
def test_availableReceivedSetsUserRole(self):
"""
The role received in a presence update is stored on the user.
"""
room = self._createRoom()
user = muc.User(self.nick)
room.addUser(user)
self.assertEquals('none', user.role)
xml = u"""
<presence to='%s' from='%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
self.assertEquals('participant', user.role)
def test_availableReceivedSetsUserAffiliation(self):
"""
The affiliation received in a presence update is stored on the user.
"""
room = self._createRoom()
user = muc.User(self.nick)
room.addUser(user)
self.assertEquals('none', user.affiliation)
xml = u"""
<presence to='%s' from='%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
self.assertEquals('member', user.affiliation)
def test_unavailableReceivedEmptySender(self):
"""
Availability presence from empty sender is ignored.
"""
self._testPresence(sender='', available=False)
def test_unavailableReceivedNotInRoom(self):
"""
Availability presence from unknown entities is ignored.
"""
otherOccupantJID = JID(self.occupantJID.userhost()+'/OtherNick')
self._testPresence(sender=otherOccupantJID, available=False)
def test_unavailableReceivedNotInRoster(self):
"""
Availability presence from unknown entities is ignored.
"""
room = self._createRoom()
user = muc.User(self.nick)
room.addUser(user)
otherOccupantJID = JID(self.occupantJID.userhost()+'/OtherNick')
self._testPresence(sender=otherOccupantJID, available=False)
def test_userJoinedRoom(self):
"""
Joins by others to a room we're in are passed to userJoinedRoom
"""
xml = """
<presence to='%s' from='%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.userJID.full(), self.occupantJID.full())
# create a room
self._createRoom()
def userJoinedRoom(room, user):
self.assertEquals(self.roomJID, room.roomJID,
'Wrong room name')
self.assertTrue(room.inRoster(user), 'User not in roster')
d, self.protocol.userJoinedRoom = calledAsync(userJoinedRoom)
self.stub.send(parseXml(xml))
return d
def test_receivedSubject(self):
"""
Subject received from a room we're in are passed to receivedSubject.
"""
xml = u"""
<message to='%s' from='%s' type='groupchat'>
<subject>test</subject>
</message>
""" % (self.userJID, self.occupantJID)
self._createRoom()
# add user to room
user = muc.User(self.nick)
room = self.protocol._getRoom(self.roomJID)
room.addUser(user)
def receivedSubject(room, user, subject):
self.assertEquals('test', subject, "Wrong group chat message")
self.assertEquals(self.roomJID, room.roomJID,
'Wrong room name')
self.assertEquals(self.nick, user.nick)
d, self.protocol.receivedSubject = calledAsync(receivedSubject)
self.stub.send(parseXml(xml))
return d
def test_receivedSubjectNotOverridden(self):
"""
Not overriding receivedSubject is ok.
"""
xml = u"""
<message to='%s' from='%s' type='groupchat'>
<subject>test</subject>
</message>
""" % (self.userJID, self.occupantJID)
self._createRoom()
self.stub.send(parseXml(xml))
def test_receivedGroupChat(self):
"""
Messages received from a room we're in are passed to receivedGroupChat.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
</message>
""" % (self.occupantJID)
self._createRoom()
def receivedGroupChat(room, user, message):
self.assertEquals('test', message.body, "Wrong group chat message")
self.assertEquals(self.roomJID, room.roomJID,
'Wrong room name')
d, self.protocol.receivedGroupChat = calledAsync(receivedGroupChat)
self.stub.send(parseXml(xml))
return d
def test_receivedGroupChatRoom(self):
"""
Messages received from the room itself have C{user} set to C{None}.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
</message>
""" % (self.roomJID)
self._createRoom()
def receivedGroupChat(room, user, message):
self.assertIdentical(None, user)
d, self.protocol.receivedGroupChat = calledAsync(receivedGroupChat)
self.stub.send(parseXml(xml))
return d
def test_receivedGroupChatNotInRoom(self):
"""
Messages received from a room we're not in are ignored.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
</message>
""" % (self.occupantJID)
def receivedGroupChat(room, user, message):
self.fail("Unexpected call to receivedGroupChat")
self.protocol.receivedGroupChat = receivedGroupChat
self.stub.send(parseXml(xml))
def test_receivedGroupChatNotOverridden(self):
"""
Not overriding receivedGroupChat is ok.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
</message>
""" % (self.occupantJID)
self._createRoom()
self.stub.send(parseXml(xml))
def test_join(self):
"""
Joining a room waits for confirmation, deferred fires room.
"""
def cb(room):
self.assertEqual(self.roomJID, room.roomJID)
self.assertFalse(room.locked)
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(cb)
# send back user presence, they joined
xml = """
<presence from='%s@%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.roomIdentifier, self.service, self.nick)
self.stub.send(parseXml(xml))
return d
def test_joinLocked(self):
"""
A new room is locked by default.
"""
def cb(room):
self.assertTrue(room.locked, "Room is not marked as locked")
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(cb)
# send back user presence, they joined
xml = """
<presence from='%s@%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='owner' role='moderator'/>
<status code="110"/>
<status code="201"/>
</x>
</presence>
""" % (self.roomIdentifier, self.service, self.nick)
self.stub.send(parseXml(xml))
return d
def test_joinForbidden(self):
"""
A forbidden error in response to a join errbacks with L{StanzaError}.
"""
def cb(error):
self.assertEquals('forbidden', error.condition,
'Wrong muc condition')
self.assertIdentical(None, self.protocol._getRoom(self.roomJID))
d = self.protocol.join(self.roomJID, self.nick)
self.assertFailure(d, StanzaError)
d.addCallback(cb)
# send back error, forbidden
xml = u"""
<presence from='%s' type='error'>
<error type='auth'>
<forbidden xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
</error>
</presence>
""" % (self.occupantJID)
self.stub.send(parseXml(xml))
return d
def test_userLeftRoom(self):
"""
Unavailable presence from a participant removes it from the room.
"""
xml = u"""
<presence to='%s' from='%s' type='unavailable'/>
""" % (self.userJID, self.occupantJID)
# create a room
self._createRoom()
# add user to room
user = muc.User(self.nick)
room = self.protocol._getRoom(self.roomJID)
room.addUser(user)
def userLeftRoom(room, user):
self.assertEquals(self.roomJID, room.roomJID,
'Wrong room name')
self.assertFalse(room.inRoster(user), 'User in roster')
d, self.protocol.userLeftRoom = calledAsync(userLeftRoom)
self.stub.send(parseXml(xml))
return d
def test_receivedHistory(self):
"""
Receiving history on room join.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
<delay xmlns='urn:xmpp:delay' stamp="2002-10-13T23:58:37Z"
from="%s"/>
</message>
""" % (self.occupantJID, self.userJID)
self._createRoom()
def receivedHistory(room, user, message):
self.assertEquals('test', message.body, "wrong message body")
stamp = datetime(2002, 10, 13, 23, 58, 37, tzinfo=tzutc())
self.assertEquals(stamp, message.delay.stamp,
'Does not have a history stamp')
d, self.protocol.receivedHistory = calledAsync(receivedHistory)
self.stub.send(parseXml(xml))
return d
def test_receivedHistoryNotOverridden(self):
"""
Not overriding receivedHistory is ok.
"""
xml = u"""
<message to='[email protected]' from='%s' type='groupchat'>
<body>test</body>
<delay xmlns='urn:xmpp:delay' stamp="2002-10-13T23:58:37Z"
from="%s"/>
</message>
""" % (self.occupantJID, self.userJID)
self._createRoom()
self.stub.send(parseXml(xml))
def test_nickConflict(self):
"""
If the server finds the new nick in conflict, the errback is called.
"""
def cb(failure, room):
user = room.getUser(otherNick)
self.assertNotIdentical(None, user)
self.assertEqual(otherJID, user.entity)
def joined(room):
d = self.protocol.nick(room.roomJID, otherNick)
self.assertFailure(d, StanzaError)
d.addCallback(cb, room)
otherJID = JID('[email protected]/Home')
otherNick = 'otherNick'
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(joined)
# Send back other partipant's presence.
xml = u"""
<presence from='%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant' jid='%s'/>
</x>
</presence>
""" % (self.roomJID, otherNick, otherJID)
self.stub.send(parseXml(xml))
# send back user presence, they joined
xml = u"""
<presence from='%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.roomJID, self.nick)
self.stub.send(parseXml(xml))
# send back error presence, nick conflicted
xml = u"""
<presence from='%s/%s' type='error'>
<x xmlns='http://jabber.org/protocol/muc'/>
<error type='cancel'>
<conflict xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/>
</error>
</presence>
""" % (self.roomJID, otherNick)
self.stub.send(parseXml(xml))
return d
def test_nick(self):
"""
Send a nick change to the server.
"""
newNick = 'newNick'
room = self._createRoom()
def joined(room):
self.assertEqual(self.roomJID, room.roomJID)
self.assertEqual(newNick, room.nick)
user = room.getUser(newNick)
self.assertNotIdentical(None, user)
self.assertEqual(newNick, user.nick)
d = self.protocol.nick(self.roomJID, newNick)
d.addCallback(joined)
# Nick should not have been changed, yet, as we haven't gotten
# confirmation, yet.
self.assertEquals(self.nick, room.nick)
# send back user presence, nick changed
xml = u"""
<presence from='%s/%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
</presence>
""" % (self.roomJID, newNick)
self.stub.send(parseXml(xml))
return d
def test_leave(self):
"""
Client leaves a room
"""
def joined(_):
return self.protocol.leave(self.roomJID)
def left(_):
self.assertIdentical(None, self.protocol._getRoom(self.roomJID))
# Join the room
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(joined)
d.addCallback(left)
# Receive presence back from the room: joined.
xml = u"""
<presence to='%s' from='%s'/>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
# Receive presence back from the room: left.
xml = u"""
<presence to='%s' from='%s' type='unavailable'/>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
return d
def test_status(self):
"""
Change status
"""
def joined(_):
d = self.protocol.status(self.roomJID, 'xa', 'testing MUC')
d.addCallback(statusChanged)
return d
def statusChanged(room):
self.assertEqual(self.roomJID, room.roomJID)
user = room.getUser(self.nick)
self.assertNotIdentical(None, user, 'User not found')
self.assertEqual('testing MUC', user.status, 'Wrong status')
self.assertEqual('xa', user.show, 'Wrong show')
# Join the room
d = self.protocol.join(self.roomJID, self.nick)
d.addCallback(joined)
# Receive presence back from the room: joined.
xml = u"""
<presence to='%s' from='%s'/>
""" % (self.userJID, self.occupantJID)
self.stub.send(parseXml(xml))
# send back user presence, status changed
xml = u"""
<presence from='%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<item affiliation='member' role='participant'/>
</x>
<show>xa</show>
<status>testing MUC</status>
</presence>
""" % self.occupantJID
self.stub.send(parseXml(xml))
return d
def test_destroy(self):
"""
Destroy a room.
"""
def destroyed(_):
self.assertIdentical(None, self.protocol._getRoom(self.roomJID))
d = self.protocol.destroy(self.occupantJID, reason='Time to leave',
alternate=JID('other@%s' % self.service),
password='secret')
d.addCallback(destroyed)
iq = self.stub.output[-1]
response = toResponse(iq, 'result')
self.stub.send(response)
return d
|
|
"""
Display Github notifications and issue/pull requests for a repo.
To check notifications a Github `username` and `personal access token` are
required. You can create a personal access token at
https://github.com/settings/tokens/new?scopes=notifications&description=py3status
The only `scope` needed is `notifications` is selected automatically for you,
which provides readonly access to notifications.
The Github API is rate limited so setting `cache_timeout` too small may cause
issues see https://developer.github.com/v3/#rate-limiting for details
Configuration parameters:
auth_token: Github personal access token, needed to check notifications
see above.
(default None)
button_action: Button that when clicked opens the Github notification page
if notifications, else the project page for the repository if there is
one (otherwise the github home page). Setting to `None` disables.
(default 3)
button_refresh: Button that when clicked refreshes module.
Setting to `None` disables.
(default 2)
cache_timeout: How often we refresh this module in seconds
(default 60)
format: display format for this module, see Examples below (default None)
format_notifications: Format of `{notification}` status placeholder.
(default ' N{notifications_count}')
notifications: Type of notifications can be `all` for all notifications or
`repo` to only get notifications for the repo specified. If repo is
not provided then all notifications will be checked.
(default 'all')
repo: Github repo to check
(default 'ultrabug/py3status')
url_api: Change only if using Enterprise Github, example https://github.domain.com/api/v3.
(default 'https://api.github.com')
url_base: Change only if using Enterprise Github, example https://github.domain.com.
(default 'https://github.com')
username: Github username, needed to check notifications.
(default None)
Format placeholders:
{issues} Number of open issues.
{notifications} Notifications. If no notifications this will be empty.
{notifications_count} Number of notifications. This is also the __Only__
placeholder available to `format_notifications`.
{pull_requests} Number of open pull requests
{repo} short name of the repository being checked. eg py3status
{repo_full} full name of the repository being checked. eg ultrabug/py3status
Examples:
```
# default formats
github {
# with username and auth_token, this will be used
format = '{repo} {issues}/{pull_requests}{notifications}'
# otherwise, this will be used
format '{repo} {issues}/{pull_requests}'
}
# set github access credentials
github {
auth_token = '40_char_hex_access_token'
username = 'my_username'
}
# just check for any notifications
github {
auth_token = '40_char_hex_access_token'
username = 'my_username'
format = 'Github {notifications_count}'
}
```
@author tobes
SAMPLE OUTPUT
{'full_text': 'py3status 34/24'}
notification
{'full_text': 'py3status 34/24 N3', 'urgent': True}
"""
import urllib.parse as urlparse
class Py3status:
"""
"""
# available configuration parameters
auth_token = None
button_action = 3
button_refresh = 2
cache_timeout = 60
format = None
format_notifications = " N{notifications_count}"
notifications = "all"
repo = "ultrabug/py3status"
url_api = "https://api.github.com"
url_base = "https://github.com"
username = None
def post_config_hook(self):
self.notification_warning = False
self.repo_warning = False
self._issues = "?"
self._pulls = "?"
self._notify = "?"
# remove a trailing slash in the urls
self.url_api = self.url_api.strip("/")
self.url_base = self.url_base.strip("/")
# Set format if user has not configured it.
if not self.format:
if self.username and self.auth_token:
# include notifications
self.format = "{repo} {issues}/{pull_requests}{notifications}"
else:
self.format = "{repo} {issues}/{pull_requests}"
def _github_count(self, url):
"""
Get counts for requests that return 'total_count' in the json response.
"""
url = self.url_api + url + "&per_page=1"
# if we have authentication details use them as we get better
# rate-limiting.
if self.username and self.auth_token:
auth = (self.username, self.auth_token)
else:
auth = None
try:
info = self.py3.request(url, auth=auth)
except self.py3.RequestException:
return
if info and info.status_code == 200:
return int(info.json()["total_count"])
if info.status_code == 422:
if not self.repo_warning:
self.py3.notify_user("Github repo cannot be found.")
self.repo_warning = True
return "?"
def _notifications(self):
"""
Get the number of unread notifications.
"""
if not self.username or not self.auth_token:
if not self.notification_warning:
self.py3.notify_user(
"Github module needs username and "
"auth_token to check notifications."
)
self.notification_warning = True
return "?"
if self.notifications == "all" or not self.repo:
url = self.url_api + "/notifications"
else:
url = self.url_api + "/repos/" + self.repo + "/notifications"
url += "?per_page=100"
try:
info = self.py3.request(url, auth=(self.username, self.auth_token))
except self.py3.RequestException:
return
if info.status_code == 200:
links = info.headers.get("Link")
if not links:
return len(info.json())
last_page = 1
for link in links.split(","):
if 'rel="last"' in link:
last_url = link[link.find("<") + 1 : link.find(">")]
parsed = urlparse.urlparse(last_url)
last_page = int(urlparse.parse_qs(parsed.query)["page"][0])
if last_page == 1:
return len(info.json())
try:
last_page_info = self.py3.request(
last_url, auth=(self.username, self.auth_token)
)
except self.py3.RequestException:
return
return len(info.json()) * (last_page - 1) + len(last_page_info.json())
if info.status_code == 404:
if not self.repo_warning:
self.py3.notify_user("Github repo cannot be found.")
self.repo_warning = True
def github(self):
status = {}
urgent = False
# issues
if self.repo and self.py3.format_contains(self.format, "issues"):
url = "/search/issues?q=state:open+type:issue+repo:" + self.repo
self._issues = self._github_count(url) or self._issues
status["issues"] = self._issues
# pull requests
if self.repo and self.py3.format_contains(self.format, "pull_requests"):
url = "/search/issues?q=state:open+type:pr+repo:" + self.repo
self._pulls = self._github_count(url) or self._pulls
status["pull_requests"] = self._pulls
# notifications
if self.py3.format_contains(self.format, "notifications*"):
count = self._notifications()
# if we don't have a notification count, then use the last value
# that we did have.
if count is None:
count = self._notify
self._notify = count
if count and count != "?":
notify = self.py3.safe_format(
self.format_notifications, {"notifications_count": count}
)
urgent = True
else:
notify = ""
status["notifications"] = notify
status["notifications_count"] = count
# repo
try:
status["repo"] = self.repo.split("/")[1]
except IndexError:
status["repo"] = "Error"
status["repo_full"] = self.repo
cached_until = self.py3.time_in(self.cache_timeout)
return {
"full_text": self.py3.safe_format(self.format, status),
"cached_until": cached_until,
"urgent": urgent,
}
def on_click(self, event):
button = event["button"]
if button == self.button_action:
# open github in browser
if self._notify and self._notify != "?":
# open github notifications page
url = self.url_base + "/notifications"
else:
if self.notifications == "all" and not self.repo:
# open github.com if there are no unread notifications and no repo
url = self.url_base
else:
# open repo page if there are no unread notifications
url = self.url_base + "/" + self.repo
# open url in default browser
self.py3.command_run(f"xdg-open {url}")
self.py3.prevent_refresh()
elif button != self.button_refresh:
# only refresh the module if needed
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
|
"""Support for Dyson Pure Cool Link Sensors."""
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_cool_link import DysonPureCoolLink
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
STATE_OFF,
TEMP_CELSIUS,
TIME_HOURS,
)
from . import DYSON_DEVICES, DysonEntity
SENSOR_ATTRIBUTES = {
"air_quality": {ATTR_ICON: "mdi:fan"},
"dust": {ATTR_ICON: "mdi:cloud"},
"humidity": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
"temperature": {ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE},
"filter_life": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: TIME_HOURS,
},
"carbon_filter_state": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
"combi_filter_state": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
"hepa_filter_state": {
ATTR_ICON: "mdi:filter-outline",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
}
SENSOR_NAMES = {
"air_quality": "AQI",
"dust": "Dust",
"humidity": "Humidity",
"temperature": "Temperature",
"filter_life": "Filter Life",
"carbon_filter_state": "Carbon Filter Remaining Life",
"combi_filter_state": "Combi Filter Remaining Life",
"hepa_filter_state": "HEPA Filter Remaining Life",
}
DYSON_SENSOR_DEVICES = "dyson_sensor_devices"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson Sensors."""
if discovery_info is None:
return
hass.data.setdefault(DYSON_SENSOR_DEVICES, [])
unit = hass.config.units.temperature_unit
devices = hass.data[DYSON_SENSOR_DEVICES]
# Get Dyson Devices from parent component
device_ids = [device.unique_id for device in hass.data[DYSON_SENSOR_DEVICES]]
new_entities = []
for device in hass.data[DYSON_DEVICES]:
if isinstance(device, DysonPureCool):
if f"{device.serial}-temperature" not in device_ids:
new_entities.append(DysonTemperatureSensor(device, unit))
if f"{device.serial}-humidity" not in device_ids:
new_entities.append(DysonHumiditySensor(device))
# For PureCool+Humidify devices, a single filter exists, called "Combi Filter".
# It's reported with the HEPA state, while the Carbon state is set to INValid.
if device.state and device.state.carbon_filter_state == "INV":
if f"{device.serial}-hepa_filter_state" not in device_ids:
new_entities.append(DysonHepaFilterLifeSensor(device, "combi"))
else:
if f"{device.serial}-hepa_filter_state" not in device_ids:
new_entities.append(DysonHepaFilterLifeSensor(device))
if f"{device.serial}-carbon_filter_state" not in device_ids:
new_entities.append(DysonCarbonFilterLifeSensor(device))
elif isinstance(device, DysonPureCoolLink):
new_entities.append(DysonFilterLifeSensor(device))
new_entities.append(DysonDustSensor(device))
new_entities.append(DysonHumiditySensor(device))
new_entities.append(DysonTemperatureSensor(device, unit))
new_entities.append(DysonAirQualitySensor(device))
if not new_entities:
return
devices.extend(new_entities)
add_entities(devices)
class DysonSensor(DysonEntity, SensorEntity):
"""Representation of a generic Dyson sensor."""
def __init__(self, device, sensor_type):
"""Create a new generic Dyson sensor."""
super().__init__(device, None)
self._old_value = None
self._sensor_type = sensor_type
self._attributes = SENSOR_ATTRIBUTES[sensor_type]
def on_message(self, message):
"""Handle new messages which are received from the fan."""
# Prevent refreshing if not needed
if self._old_value is None or self._old_value != self.state:
self._old_value = self.state
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the Dyson sensor name."""
return f"{super().name} {SENSOR_NAMES[self._sensor_type]}"
@property
def unique_id(self):
"""Return the sensor's unique id."""
return f"{self._device.serial}-{self._sensor_type}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._attributes.get(ATTR_UNIT_OF_MEASUREMENT)
@property
def icon(self):
"""Return the icon for this sensor."""
return self._attributes.get(ATTR_ICON)
@property
def device_class(self):
"""Return the device class of this sensor."""
return self._attributes.get(ATTR_DEVICE_CLASS)
class DysonFilterLifeSensor(DysonSensor):
"""Representation of Dyson Filter Life sensor (in hours)."""
def __init__(self, device):
"""Create a new Dyson Filter Life sensor."""
super().__init__(device, "filter_life")
@property
def state(self):
"""Return filter life in hours."""
return int(self._device.state.filter_life)
class DysonCarbonFilterLifeSensor(DysonSensor):
"""Representation of Dyson Carbon Filter Life sensor (in percent)."""
def __init__(self, device):
"""Create a new Dyson Carbon Filter Life sensor."""
super().__init__(device, "carbon_filter_state")
@property
def state(self):
"""Return filter life remaining in percent."""
return int(self._device.state.carbon_filter_state)
class DysonHepaFilterLifeSensor(DysonSensor):
"""Representation of Dyson HEPA (or Combi) Filter Life sensor (in percent)."""
def __init__(self, device, filter_type="hepa"):
"""Create a new Dyson Filter Life sensor."""
super().__init__(device, f"{filter_type}_filter_state")
@property
def state(self):
"""Return filter life remaining in percent."""
return int(self._device.state.hepa_filter_state)
class DysonDustSensor(DysonSensor):
"""Representation of Dyson Dust sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Dust sensor."""
super().__init__(device, "dust")
@property
def state(self):
"""Return Dust value."""
return self._device.environmental_state.dust
class DysonHumiditySensor(DysonSensor):
"""Representation of Dyson Humidity sensor."""
def __init__(self, device):
"""Create a new Dyson Humidity sensor."""
super().__init__(device, "humidity")
@property
def state(self):
"""Return Humidity value."""
if self._device.environmental_state.humidity == 0:
return STATE_OFF
return self._device.environmental_state.humidity
class DysonTemperatureSensor(DysonSensor):
"""Representation of Dyson Temperature sensor."""
def __init__(self, device, unit):
"""Create a new Dyson Temperature sensor."""
super().__init__(device, "temperature")
self._unit = unit
@property
def state(self):
"""Return Temperature value."""
temperature_kelvin = self._device.environmental_state.temperature
if temperature_kelvin == 0:
return STATE_OFF
if self._unit == TEMP_CELSIUS:
return float(f"{(temperature_kelvin - 273.15):.1f}")
return float(f"{(temperature_kelvin * 9 / 5 - 459.67):.1f}")
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
class DysonAirQualitySensor(DysonSensor):
"""Representation of Dyson Air Quality sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Air Quality sensor."""
super().__init__(device, "air_quality")
@property
def state(self):
"""Return Air Quality value."""
return int(self._device.environmental_state.volatil_organic_compounds)
|
|
# Created by Pearu Peterson, September 2002
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test(<level>)'
Run tests if fftpack is not installed:
python tests/test_pseudo_diffs.py [<level>]
"""
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal)
from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,
ihilbert, shift, fftfreq, cs_diff, sc_diff,
ss_diff, cc_diff)
import numpy as np
from numpy import arange, sin, cos, pi, exp, tanh, sum, sign
from numpy.random import random
def direct_diff(x,k=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*2j*pi/period*n
if k < 0:
w = 1 / w**k
w[0] = 0.0
else:
w = w**k
if n > 2000:
w[250:n-250] = 0.0
return ifft(w*fx).real
def direct_tilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w[0] = 1
w = 1j/tanh(w)
w[0] = 0j
return ifft(w*fx)
def direct_itilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w = -1j*tanh(w)
return ifft(w*fx)
def direct_hilbert(x):
fx = fft(x)
n = len(fx)
w = fftfreq(n)*n
w = 1j*sign(w)
return ifft(w*fx)
def direct_ihilbert(x):
return -direct_hilbert(x)
def direct_shift(x,a,period=None):
n = len(x)
if period is None:
k = fftfreq(n)*1j*n
else:
k = fftfreq(n)*2j*pi/period*n
return ifft(fft(x)*exp(k*a)).real
class TestDiff(object):
def test_definition(self):
for n in [16,17,64,127,32]:
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))
assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))
assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))
assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))
assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))
assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))
assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))
assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))
assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))
assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))
assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))
assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))
assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))
assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))
for k in range(5):
assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))
assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))
def test_period(self):
for n in [17,64]:
x = arange(n)/float(n)
assert_array_almost_equal(diff(sin(2*pi*x),period=1),
2*pi*cos(2*pi*x))
assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),
-(2*pi)**3*cos(2*pi*x))
def test_sin(self):
for n in [32,64,77]:
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x)),cos(x))
assert_array_almost_equal(diff(cos(x)),-sin(x))
assert_array_almost_equal(diff(sin(x),2),-sin(x))
assert_array_almost_equal(diff(sin(x),4),sin(x))
assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))
assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))
def test_expr(self):
for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:
x = arange(n)*2*pi/n
f = sin(x)*cos(4*x)+exp(sin(3*x))
df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
- 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
d1 = diff(f)
assert_array_almost_equal(d1,df)
assert_array_almost_equal(diff(df),ddf)
assert_array_almost_equal(diff(f,2),ddf)
assert_array_almost_equal(diff(ddf,-1),df)
def test_expr_large(self):
for n in [2048,4096]:
x = arange(n)*2*pi/n
f = sin(x)*cos(4*x)+exp(sin(3*x))
df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
- 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
assert_array_almost_equal(diff(f),df)
assert_array_almost_equal(diff(df),ddf)
assert_array_almost_equal(diff(ddf,-1),df)
assert_array_almost_equal(diff(f,2),ddf)
def test_int(self):
n = 64
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x),-1),-cos(x))
assert_array_almost_equal(diff(sin(x),-2),-sin(x))
assert_array_almost_equal(diff(sin(x),-4),sin(x))
assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))
def test_random_even(self):
for k in [0,2,4,6]:
for n in [60,32,64,56,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
def test_random_odd(self):
for k in [0,1,2,3,4,5,6]:
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
def test_zero_nyquist(self):
for k in [0,1,2,3,4,5,6]:
for n in [32,33,64,56,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
class TestTilbert(object):
def test_definition(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = tilbert(sin(x),h)
y1 = direct_tilbert(sin(x),h)
assert_array_almost_equal(y,y1)
assert_array_almost_equal(tilbert(sin(x),h),
direct_tilbert(sin(x),h))
assert_array_almost_equal(tilbert(sin(2*x),h),
direct_tilbert(sin(2*x),h))
def test_random_even(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [32,64,56]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)
def test_random_odd(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(itilbert(tilbert(f,h),h),f)
assert_array_almost_equal(tilbert(itilbert(f,h),h),f)
class TestITilbert(object):
def test_definition(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = itilbert(sin(x),h)
y1 = direct_itilbert(sin(x),h)
assert_array_almost_equal(y,y1)
assert_array_almost_equal(itilbert(sin(x),h),
direct_itilbert(sin(x),h))
assert_array_almost_equal(itilbert(sin(2*x),h),
direct_itilbert(sin(2*x),h))
class TestHilbert(object):
def test_definition(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = hilbert(sin(x))
y1 = direct_hilbert(sin(x))
assert_array_almost_equal(y,y1)
assert_array_almost_equal(hilbert(sin(2*x)),
direct_hilbert(sin(2*x)))
def test_tilbert_relation(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
f = sin(x)+cos(2*x)*sin(x)
y = hilbert(f)
y1 = direct_hilbert(f)
assert_array_almost_equal(y,y1)
y2 = tilbert(f,h=10)
assert_array_almost_equal(y,y2)
def test_random_odd(self):
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(ihilbert(hilbert(f)),f)
assert_array_almost_equal(hilbert(ihilbert(f)),f)
def test_random_even(self):
for n in [32,64,56]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
assert_array_almost_equal(hilbert(ihilbert(f)),f)
class TestIHilbert(object):
def test_definition(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = ihilbert(sin(x))
y1 = direct_ihilbert(sin(x))
assert_array_almost_equal(y,y1)
assert_array_almost_equal(ihilbert(sin(2*x)),
direct_ihilbert(sin(2*x)))
def test_itilbert_relation(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
f = sin(x)+cos(2*x)*sin(x)
y = ihilbert(f)
y1 = direct_ihilbert(f)
assert_array_almost_equal(y,y1)
y2 = itilbert(f,h=10)
assert_array_almost_equal(y,y2)
class TestShift(object):
def test_definition(self):
for n in [18,17,64,127,32,2048,256]:
x = arange(n)*2*pi/n
for a in [0.1,3]:
assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))
assert_array_almost_equal(shift(sin(x),a),sin(x+a))
assert_array_almost_equal(shift(cos(x),a),cos(x+a))
assert_array_almost_equal(shift(cos(2*x)+sin(x),a),
cos(2*(x+a))+sin(x+a))
assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))
assert_array_almost_equal(shift(sin(x),2*pi),sin(x))
assert_array_almost_equal(shift(sin(x),pi),-sin(x))
assert_array_almost_equal(shift(sin(x),pi/2),cos(x))
class TestOverwrite(object):
"""Check input overwrite behavior """
real_dtypes = (np.float32, np.float64)
dtypes = real_dtypes + (np.complex64, np.complex128)
def _check(self, x, routine, *args, **kwargs):
x2 = x.copy()
routine(x2, *args, **kwargs)
sig = routine.__name__
if args:
sig += repr(args)
if kwargs:
sig += repr(kwargs)
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, *args, **kwargs):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
self._check(data, routine, *args, **kwargs)
def test_diff(self):
for dtype in self.dtypes:
self._check_1d(diff, dtype, (16,))
def test_tilbert(self):
for dtype in self.dtypes:
self._check_1d(tilbert, dtype, (16,), 1.6)
def test_itilbert(self):
for dtype in self.dtypes:
self._check_1d(itilbert, dtype, (16,), 1.6)
def test_hilbert(self):
for dtype in self.dtypes:
self._check_1d(hilbert, dtype, (16,))
def test_cs_diff(self):
for dtype in self.dtypes:
self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)
def test_sc_diff(self):
for dtype in self.dtypes:
self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)
def test_ss_diff(self):
for dtype in self.dtypes:
self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)
def test_cc_diff(self):
for dtype in self.dtypes:
self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)
def test_shift(self):
for dtype in self.dtypes:
self._check_1d(shift, dtype, (16,), 1.0)
|
|
import os
import logging
import sys
import argparse
import pprint
from urlparse import urljoin
import six
from flask import current_app
from itsdangerous import TimedSerializer, BadData
import requests
from bitcoinrpc.authproxy import JSONRPCException
from .coinserv_cmds import payout_many
from . import create_app, coinserv
logger = logging.getLogger("toroidal")
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
hdlr = logging.FileHandler('rpc.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
class RPCException(Exception):
pass
class RPCClient(object):
def __init__(self, config_path='/config.yml', root_suffix='/../',
max_age=5):
self.root = os.path.abspath(os.path.dirname(__file__) + root_suffix)
self.config = current_app.config
del current_app.logger.handlers[0]
current_app.logger.addHandler(ch)
self.serializer = TimedSerializer(self.config['rpc_signature'])
self.max_age = max_age
def post(self, url, *args, **kwargs):
if 'data' not in kwargs:
kwargs['data'] = ''
kwargs['data'] = self.serializer.dumps(kwargs['data'])
return self.remote(url, 'post', *args, **kwargs)
def get(self, url, *args, **kwargs):
return self.remote(url, 'get', *args, **kwargs)
def remote(self, url, method, max_age=None, **kwargs):
url = urljoin(self.config['rpc_url'], url)
ret = getattr(requests, method)(url, **kwargs)
if ret.status_code != 200:
raise RPCException("Non 200 from remote")
try:
logger.debug("Got {} from remote".format(ret.text))
return self.serializer.loads(ret.text, max_age or self.max_age)
except BadData:
raise RPCException("Invalid signature: {}".format(ret.text))
def poke_rpc(self):
try:
coinserv.getinfo()
except JSONRPCException:
raise RPCException("Coinserver not awake")
def reset_trans(self, pids, bids, simulate=False):
proc_pids = []
if pids:
proc_pids = [int(i) for i in pids.split(',')]
proc_bids = []
if bids:
proc_bids = [int(i) for i in bids.split(',')]
data = {'pids': proc_pids, 'bids': proc_bids, 'reset': True}
logger.info("Reseting requested bids and pids")
self.post('update_payouts', data=data)
def proc_trans(self, simulate=False):
self.poke_rpc()
lock = True
if simulate:
lock = False
payouts, bonus_payouts, lock_res = self.post('get_payouts',
data={'lock': lock})
if lock:
assert lock_res
pids = [t[2] for t in payouts]
bids = [t[2] for t in bonus_payouts]
logger.warn("Locked all recieved payout ids and bonus payout ids. In "
"the event of an error, run the following command to unlock"
"for a retried payout.\nsc_rpc reset_trans '{}' '{}'"
.format(",".join(str(p) for p in pids),
",".join(str(b) for b in bids)))
if not len(pids) and not len(bids):
logger.info("No payouts to process..")
return
logger.info("Recieved {} payouts and {} bonus payouts from the server"
.format(len(pids), len(bids)))
# builds two dictionaries, one that tracks the total payouts to a user,
# and another that tracks all the payout ids (pids) giving that amount
# to the user
totals = {}
pids = {}
bids = {}
for user, amount, id in payouts:
if user.startswith(current_app.config['payout_prefix']):
totals.setdefault(user, 0)
totals[user] += amount
pids.setdefault(user, [])
pids[user].append(id)
else:
logger.warn("User {} has been excluded due to invalid address"
.format(user))
for user, amount, id in bonus_payouts:
if user.startswith(current_app.config['payout_prefix']):
totals.setdefault(user, 0)
totals[user] += amount
bids.setdefault(user, [])
bids[user].append(id)
else:
logger.warn("User {} has been excluded due to invalid address"
.format(user))
# identify the users who meet minimum payout and format for sending
# to rpc
users = {user: amount / float(100000000) for user, amount in totals.iteritems()
if amount > current_app.config['minimum_payout']}
logger.info("Trying to payout a total of {}".format(sum(users.values())))
if len(users) == 0:
logger.info("Nobody has a big enough balance to pay out...")
return
# now we have all the users who we're going to send money. build a list
# of the pids that will be being paid in this transaction
committed_pids = []
for user in users:
committed_pids.extend(pids.get(user, []))
committed_bids = []
for user in users:
committed_bids.extend(bids.get(user, []))
logger.info("Total user payouts")
logger.info(pprint.pformat(users))
logger.info("Total bonus IDs")
logger.info(pprint.pformat(bids))
logger.info("Total payout IDs")
logger.info(pprint.pformat(pids))
logger.info("List of payout ids to be committed")
logger.info(committed_pids)
logger.info("List of bonus payout ids to be committed")
logger.info(committed_bids)
if simulate:
exit(0)
# now actually pay them
coin_txid = payout_many(users)
#coin_txid = "1111111111111111111111111111111111111111111111111111111111111111"
logger.info("Got {} as txid for payout!".format(coin_txid))
data = {'coin_txid': coin_txid, 'pids': committed_pids, 'bids': committed_bids}
logger.info("Sending data back to confirm_payouts: " + str(data))
while True:
try:
if self.post('update_payouts', data=data):
logger.info("Recieved success response from the server.")
break
else:
logger.error("Server returned failure response")
except Exception:
logger.error("Error recieved, press enter to retry",
exc_info=True)
raw_input()
def entry():
parser = argparse.ArgumentParser(prog='toro')
parser.add_argument('-l',
'--log-level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR'],
default='WARN')
parser.add_argument('-s',
'--simulate',
action='store_true',
default=False)
subparsers = parser.add_subparsers(title='main subcommands', dest='action')
subparsers.add_parser('proc_trans',
help='processes transactions locally by fetching '
'from a remote server')
reset = subparsers.add_parser('reset_trans',
help='resets the lock state of a set of pids'
' and bids')
reset.add_argument('pids')
reset.add_argument('bids')
args = parser.parse_args()
ch.setLevel(getattr(logging, args.log_level))
logger.setLevel(getattr(logging, args.log_level))
global_args = ['log_level', 'action']
# subcommand functions shouldn't recieve arguments directed at the
# global object/ configs
kwargs = {k: v for k, v in six.iteritems(vars(args)) if k not in global_args}
app = create_app()
with app.app_context():
interface = RPCClient()
try:
getattr(interface, args.action)(**kwargs)
except requests.exceptions.ConnectionError:
logger.error("Couldn't connect to remote server", exc_info=True)
except JSONRPCException as e:
logger.error("Recieved exception from rpc server: {}"
.format(getattr(e, 'error')))
|
|
import linecache
import os.path
import re
import sys
import traceback # @Reimport
# IFDEF CYTHON
# import dis
# ENDIF
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_dont_trace
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle.pydevd_breakpoints import get_exception_breakpoint
from _pydevd_bundle.pydevd_comm_constants import (CMD_STEP_CAUGHT_EXCEPTION, CMD_STEP_RETURN, CMD_STEP_OVER, CMD_SET_BREAK, \
CMD_STEP_INTO, CMD_SMART_STEP_INTO, CMD_RUN_TO_LINE, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO_MY_CODE)
from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, get_current_thread_id, STATE_RUN, dict_iter_values, IS_PY3K, \
dict_keys, RETURN_VALUES_DICT, NO_FTRACE
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE, PYDEV_FILE
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace
from _pydevd_bundle.pydevd_utils import get_clsname_for_code
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, is_real_file
try:
from inspect import CO_GENERATOR
except:
CO_GENERATOR = 0
from _pydevd_bundle.pydevd_constants import IS_PY2
try:
from _pydevd_bundle.pydevd_signature import send_signature_call_trace, send_signature_return_trace
except ImportError:
def send_signature_call_trace(*args, **kwargs):
pass
basename = os.path.basename
IGNORE_EXCEPTION_TAG = re.compile('[^#]*#.*@IgnoreException')
DEBUG_START = ('pydevd.py', '_exec')
DEBUG_START_PY3K = ('_pydev_execfile.py', 'execfile')
TRACE_PROPERTY = 'pydevd_traceproperty.py'
get_file_type = DONT_TRACE.get
def handle_breakpoint_condition(py_db, info, breakpoint, new_frame):
condition = breakpoint.condition
try:
if breakpoint.handle_hit_condition(new_frame):
return True
if condition is None:
return False
return eval(condition, new_frame.f_globals, new_frame.f_locals)
except Exception as e:
if IS_PY2:
# Must be bytes on py2.
if isinstance(condition, unicode):
condition = condition.encode('utf-8')
if not isinstance(e, py_db.skip_print_breakpoint_exception):
sys.stderr.write('Error while evaluating expression: %s\n' % (condition,))
etype, value, tb = sys.exc_info()
traceback.print_exception(etype, value, tb.tb_next)
if not isinstance(e, py_db.skip_suspend_on_breakpoint_exception):
try:
# add exception_type and stacktrace into thread additional info
etype, value, tb = sys.exc_info()
error = ''.join(traceback.format_exception_only(etype, value))
stack = traceback.extract_stack(f=tb.tb_frame.f_back)
# On self.set_suspend(thread, CMD_SET_BREAK) this info will be
# sent to the client.
info.conditional_breakpoint_exception = \
('Condition:\n' + condition + '\n\nError:\n' + error, stack)
except:
traceback.print_exc()
return True
return False
finally:
etype, value, tb = None, None, None
def handle_breakpoint_expression(breakpoint, info, new_frame):
try:
try:
val = eval(breakpoint.expression, new_frame.f_globals, new_frame.f_locals)
except:
val = sys.exc_info()[1]
finally:
if val is not None:
info.pydev_message = str(val)
#=======================================================================================================================
# PyDBFrame
#=======================================================================================================================
# IFDEF CYTHON
# cdef class PyDBFrame:
# ELSE
class PyDBFrame:
'''This makes the tracing for a given frame, so, the trace_dispatch
is used initially when we enter into a new context ('call') and then
is reused for the entire context.
'''
# ENDIF
# Note: class (and not instance) attributes.
# Same thing in the main debugger but only considering the file contents, while the one in the main debugger
# considers the user input (so, the actual result must be a join of both).
filename_to_lines_where_exceptions_are_ignored = {}
filename_to_stat_info = {}
# IFDEF CYTHON
# cdef tuple _args
# cdef int should_skip
# cdef int _bytecode_offset
# cdef list _instructions
# def __init__(self, tuple args):
# self._args = args # In the cython version we don't need to pass the frame
# self.should_skip = -1 # On cythonized version, put in instance.
# self._bytecode_offset = 0
# self._instructions = None
# ELSE
should_skip = -1 # Default value in class (put in instance on set).
def __init__(self, args):
# args = main_debugger, filename, base, info, t, frame
# yeap, much faster than putting in self and then getting it from self later on
self._args = args
self._bytecode_offset = 0
# ENDIF
def set_suspend(self, *args, **kwargs):
self._args[0].set_suspend(*args, **kwargs)
def do_wait_suspend(self, *args, **kwargs):
self._args[0].do_wait_suspend(*args, **kwargs)
# IFDEF CYTHON
# def trace_exception(self, frame, str event, arg):
# cdef bint should_stop;
# ELSE
def trace_exception(self, frame, event, arg):
# ENDIF
if event == 'exception':
should_stop, frame = self.should_stop_on_exception(frame, event, arg)
if should_stop:
self.handle_exception(frame, event, arg)
return self.trace_dispatch
return self.trace_exception
def trace_return(self, frame, event, arg):
if event == 'return':
main_debugger, filename = self._args[0], self._args[1]
send_signature_return_trace(main_debugger, frame, filename, arg)
return self.trace_return
# IFDEF CYTHON
# def should_stop_on_exception(self, frame, str event, arg):
# cdef PyDBAdditionalThreadInfo info;
# cdef bint flag;
# ELSE
def should_stop_on_exception(self, frame, event, arg):
# ENDIF
# main_debugger, _filename, info, _thread = self._args
main_debugger = self._args[0]
info = self._args[2]
should_stop = False
# STATE_SUSPEND = 2
if info.pydev_state != 2: # and breakpoint is not None:
exception, value, trace = arg
if trace is not None and hasattr(trace, 'tb_next'):
# on jython trace is None on the first event and it may not have a tb_next.
exception_breakpoint = get_exception_breakpoint(
exception, main_debugger.break_on_caught_exceptions)
is_real = is_real_file(frame.f_code.co_filename)
if exception_breakpoint is not None:
if exception_breakpoint.condition is not None:
# Always add exception to frame (must remove later after we proceed).
add_exception_to_frame(frame, (exception, value, trace))
eval_result = handle_breakpoint_condition(main_debugger, info, exception_breakpoint, frame)
remove_exception_from_frame(frame)
if not eval_result:
return False, frame
if exception_breakpoint.ignore_libraries:
if not main_debugger.is_exception_trace_in_project_scope(trace):
return False, frame
if ignore_exception_trace(trace):
return False, frame
was_just_raised = just_raised(trace)
if was_just_raised:
if main_debugger.skip_on_exceptions_thrown_in_same_context:
# Option: Don't break if an exception is caught in the same function from which it is thrown
return False, frame
if exception_breakpoint.notify_on_first_raise_only:
if main_debugger.skip_on_exceptions_thrown_in_same_context:
# In this case we never stop if it was just raised, so, to know if it was the first we
# need to check if we're in the 2nd method.
if not was_just_raised and not just_raised(trace.tb_next):
return False, frame # I.e.: we stop only when we're at the caller of a method that throws an exception
else:
if not was_just_raised and not main_debugger.is_top_level_trace_in_project_scope(trace):
return False, frame # I.e.: we stop only when it was just raised
# If it got here we should stop.
should_stop = True
try:
info.pydev_message = exception_breakpoint.qname
except:
info.pydev_message = exception_breakpoint.qname.encode('utf-8')
# Always add exception to frame (must remove later after we proceed).
add_exception_to_frame(frame, (exception, value, trace))
info.pydev_message = "python-%s" % info.pydev_message
else:
# No regular exception breakpoint, let's see if some plugin handles it.
try:
if main_debugger.plugin is not None:
result = main_debugger.plugin.exception_break(main_debugger, self, frame, self._args, arg)
if result:
should_stop, frame = result
except:
should_stop = False
if should_stop:
if exception_breakpoint is not None and exception_breakpoint.expression is not None:
handle_breakpoint_expression(exception_breakpoint, info, frame)
return should_stop, frame
def handle_exception(self, frame, event, arg):
try:
# We have 3 things in arg: exception type, description, traceback object
trace_obj = arg[2]
main_debugger = self._args[0]
initial_trace_obj = trace_obj
if trace_obj.tb_next is None and trace_obj.tb_frame is frame:
# I.e.: tb_next should be only None in the context it was thrown (trace_obj.tb_frame is frame is just a double check).
pass
else:
# Get the trace_obj from where the exception was raised...
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
if main_debugger.ignore_exceptions_thrown_in_lines_with_ignore_exception:
for check_trace_obj in (initial_trace_obj, trace_obj):
filename = get_abs_path_real_path_and_base_from_frame(check_trace_obj.tb_frame)[1]
filename_to_lines_where_exceptions_are_ignored = self.filename_to_lines_where_exceptions_are_ignored
lines_ignored = filename_to_lines_where_exceptions_are_ignored.get(filename)
if lines_ignored is None:
lines_ignored = filename_to_lines_where_exceptions_are_ignored[filename] = {}
try:
curr_stat = os.stat(filename)
curr_stat = (curr_stat.st_size, curr_stat.st_mtime)
except:
curr_stat = None
last_stat = self.filename_to_stat_info.get(filename)
if last_stat != curr_stat:
self.filename_to_stat_info[filename] = curr_stat
lines_ignored.clear()
try:
linecache.checkcache(filename)
except:
# Jython 2.1
linecache.checkcache()
from_user_input = main_debugger.filename_to_lines_where_exceptions_are_ignored.get(filename)
if from_user_input:
merged = {}
merged.update(lines_ignored)
# Override what we have with the related entries that the user entered
merged.update(from_user_input)
else:
merged = lines_ignored
exc_lineno = check_trace_obj.tb_lineno
# print ('lines ignored', lines_ignored)
# print ('user input', from_user_input)
# print ('merged', merged, 'curr', exc_lineno)
if exc_lineno not in merged: # Note: check on merged but update lines_ignored.
try:
line = linecache.getline(filename, exc_lineno, check_trace_obj.tb_frame.f_globals)
except:
# Jython 2.1
line = linecache.getline(filename, exc_lineno)
if IGNORE_EXCEPTION_TAG.match(line) is not None:
lines_ignored[exc_lineno] = 1
return
else:
# Put in the cache saying not to ignore
lines_ignored[exc_lineno] = 0
else:
# Ok, dict has it already cached, so, let's check it...
if merged.get(exc_lineno, 0):
return
thread = self._args[3]
try:
frame_id_to_frame = {}
frame_id_to_frame[id(frame)] = frame
f = trace_obj.tb_frame
while f is not None:
frame_id_to_frame[id(f)] = f
f = f.f_back
f = None
thread_id = get_current_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, frame_id_to_frame)
try:
main_debugger.send_caught_exception_stack(thread, arg, id(frame))
self.set_suspend(thread, CMD_STEP_CAUGHT_EXCEPTION)
self.do_wait_suspend(thread, frame, event, arg)
main_debugger.send_caught_exception_stack_proceeded(thread)
finally:
pydevd_vars.remove_additional_frame_by_id(thread_id)
except KeyboardInterrupt as e:
raise e
except:
traceback.print_exc()
main_debugger.set_trace_for_frame_and_parents(frame)
finally:
# Make sure the user cannot see the '__exception__' we added after we leave the suspend state.
remove_exception_from_frame(frame)
# Clear some local variables...
frame = None
trace_obj = None
initial_trace_obj = None
check_trace_obj = None
f = None
frame_id_to_frame = None
main_debugger = None
thread = None
def get_func_name(self, frame):
code_obj = frame.f_code
func_name = code_obj.co_name
try:
cls_name = get_clsname_for_code(code_obj, frame)
if cls_name is not None:
return "%s.%s" % (cls_name, func_name)
else:
return func_name
except:
traceback.print_exc()
return func_name
def manage_return_values(self, main_debugger, frame, event, arg):
def get_func_name(frame):
code_obj = frame.f_code
func_name = code_obj.co_name
try:
cls_name = get_clsname_for_code(code_obj, frame)
if cls_name is not None:
return "%s.%s" % (cls_name, func_name)
else:
return func_name
except:
traceback.print_exc()
return func_name
try:
if main_debugger.show_return_values:
if event == "return" and hasattr(frame, "f_code") and hasattr(frame.f_code, "co_name"):
if hasattr(frame, "f_back") and hasattr(frame.f_back, "f_locals"):
if RETURN_VALUES_DICT not in dict_keys(frame.f_back.f_locals):
frame.f_back.f_locals[RETURN_VALUES_DICT] = {}
name = get_func_name(frame)
frame.f_back.f_locals[RETURN_VALUES_DICT][name] = arg
if main_debugger.remove_return_values_flag:
# Showing return values was turned off, we should remove them from locals dict.
# The values can be in the current frame or in the back one
if RETURN_VALUES_DICT in dict_keys(frame.f_locals):
frame.f_locals.pop(RETURN_VALUES_DICT)
if hasattr(frame, "f_back") and hasattr(frame.f_back, "f_locals"):
if RETURN_VALUES_DICT in dict_keys(frame.f_back.f_locals):
frame.f_back.f_locals.pop(RETURN_VALUES_DICT)
main_debugger.remove_return_values_flag = False
except:
main_debugger.remove_return_values_flag = False
traceback.print_exc()
def clear_run_state(self, info):
info.pydev_step_stop = None
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
# IFDEF CYTHON
# cpdef trace_dispatch(self, frame, str event, arg):
# cdef str filename;
# cdef bint is_exception_event;
# cdef bint has_exception_breakpoints;
# cdef bint can_skip;
# cdef PyDBAdditionalThreadInfo info;
# cdef int step_cmd;
# cdef int line;
# cdef bint is_line;
# cdef bint is_call;
# cdef bint is_return;
# cdef bint should_stop;
# cdef dict breakpoints_for_file;
# cdef str curr_func_name;
# cdef bint exist_result;
# cdef dict frame_skips_cache;
# cdef tuple frame_cache_key;
# cdef tuple line_cache_key;
# cdef int breakpoints_in_line_cache;
# cdef int breakpoints_in_frame_cache;
# cdef bint has_breakpoint_in_frame;
# cdef bint need_trace_return;
# ELSE
def trace_dispatch(self, frame, event, arg):
# ENDIF
main_debugger, filename, info, thread, frame_skips_cache, frame_cache_key = self._args
# print('frame trace_dispatch %s %s %s %s %s' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event, info.pydev_step_cmd))
# The thread can be already suspended by another function, e.g. built-in breakpoint hook.
if info.is_tracing:
return None
try:
info.is_tracing = True
line = frame.f_lineno
line_cache_key = (frame_cache_key, line)
if main_debugger._finish_debugging_session:
if event != 'call': frame.f_trace = NO_FTRACE
return None
# IFDEF CYTHON
# if event == 'opcode':
# instructions = self._get_instructions(frame)
# for i, inst in enumerate(instructions):
# if inst.offset == frame.f_lasti:
# opname, arg, argval = inst.opname, inst.arg, str(inst.argval)
# print('frame trace_dispatch %s %s %s %s %s %s %s %s' % (frame.f_lineno, frame.f_lasti, frame.f_code.co_name,
# frame.f_code.co_filename, event, opname, arg, argval))
# try:
# self._bytecode_offset = instructions[i + 1].offset
# except IndexError:
# break
# return self.trace_dispatch
# ENDIF
plugin_manager = main_debugger.plugin
is_exception_event = event == 'exception'
has_exception_breakpoints = main_debugger.break_on_caught_exceptions or main_debugger.has_plugin_exception_breaks
if is_exception_event:
if has_exception_breakpoints:
should_stop, frame = self.should_stop_on_exception(frame, event, arg)
if should_stop:
self.handle_exception(frame, event, arg)
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
is_line = False
is_return = False
is_call = False
else:
is_line = event == 'line'
is_return = event == 'return'
is_call = event == 'call'
if not is_line and not is_return and not is_call:
# Unexpected: just keep the same trace func.
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
need_signature_trace_return = False
if main_debugger.signature_factory is not None:
if is_call:
need_signature_trace_return = send_signature_call_trace(main_debugger, frame, filename)
elif is_return:
send_signature_return_trace(main_debugger, frame, filename, arg)
stop_frame = info.pydev_step_stop
step_cmd = info.pydev_step_cmd
if is_exception_event:
breakpoints_for_file = None
# CMD_STEP_OVER = 108
if stop_frame and stop_frame is not frame and step_cmd == 108 and \
arg[0] in (StopIteration, GeneratorExit) and arg[2] is None:
info.pydev_step_cmd = 107 # CMD_STEP_INTO = 107
info.pydev_step_stop = None
else:
# If we are in single step mode and something causes us to exit the current frame, we need to make sure we break
# eventually. Force the step mode to step into and the step stop frame to None.
# I.e.: F6 in the end of a function should stop in the next possible position (instead of forcing the user
# to make a step in or step over at that location).
# Note: this is especially troublesome when we're skipping code with the
# @DontTrace comment.
if stop_frame is frame and is_return and step_cmd in (109, 108): # CMD_STEP_RETURN = 109, CMD_STEP_OVER = 108
if not frame.f_code.co_flags & 0x20: # CO_GENERATOR = 0x20 (inspect.CO_GENERATOR)
info.pydev_step_cmd = 107 # CMD_STEP_INTO = 107
info.pydev_step_stop = None
breakpoints_for_file = main_debugger.breakpoints.get(filename)
can_skip = False
if info.pydev_state == 1: # STATE_RUN = 1
# we can skip if:
# - we have no stop marked
# - we should make a step return/step over and we're not in the current frame
# CMD_STEP_RETURN = 109, CMD_STEP_OVER = 108
can_skip = (step_cmd == -1 and stop_frame is None) \
or (step_cmd in (109, 108) and stop_frame is not frame)
if can_skip:
if plugin_manager is not None and main_debugger.has_plugin_line_breaks:
can_skip = not plugin_manager.can_not_skip(main_debugger, self, frame, info)
# CMD_STEP_OVER = 108
if can_skip and main_debugger.show_return_values and info.pydev_step_cmd == 108 and frame.f_back is info.pydev_step_stop:
# trace function for showing return values after step over
can_skip = False
# Let's check to see if we are in a function that has a breakpoint. If we don't have a breakpoint,
# we will return nothing for the next trace
# also, after we hit a breakpoint and go to some other debugging state, we have to force the set trace anyway,
# so, that's why the additional checks are there.
if not breakpoints_for_file:
if can_skip:
if has_exception_breakpoints:
frame.f_trace = self.trace_exception
return self.trace_exception
else:
if need_signature_trace_return:
frame.f_trace = self.trace_return
return self.trace_return
else:
if not is_call: frame.f_trace = NO_FTRACE
return None
else:
# When cached, 0 means we don't have a breakpoint and 1 means we have.
if can_skip:
breakpoints_in_line_cache = frame_skips_cache.get(line_cache_key, -1)
if breakpoints_in_line_cache == 0:
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
breakpoints_in_frame_cache = frame_skips_cache.get(frame_cache_key, -1)
if breakpoints_in_frame_cache != -1:
# Gotten from cache.
has_breakpoint_in_frame = breakpoints_in_frame_cache == 1
else:
has_breakpoint_in_frame = False
# Checks the breakpoint to see if there is a context match in some function
curr_func_name = frame.f_code.co_name
# global context is set with an empty name
if curr_func_name in ('?', '<module>', '<lambda>'):
curr_func_name = ''
for breakpoint in dict_iter_values(breakpoints_for_file): # jython does not support itervalues()
# will match either global or some function
if breakpoint.func_name in ('None', curr_func_name):
has_breakpoint_in_frame = True
break
# Cache the value (1 or 0 or -1 for default because of cython).
if has_breakpoint_in_frame:
frame_skips_cache[frame_cache_key] = 1
else:
frame_skips_cache[frame_cache_key] = 0
if can_skip and not has_breakpoint_in_frame:
if has_exception_breakpoints:
frame.f_trace = self.trace_exception
return self.trace_exception
else:
if need_signature_trace_return:
frame.f_trace = self.trace_return
return self.trace_return
else:
if not is_call: frame.f_trace = NO_FTRACE
return None
# We may have hit a breakpoint or we are already in step mode. Either way, let's check what we should do in this frame
# print('NOT skipped: %s %s %s %s' % (frame.f_lineno, frame.f_code.co_name, event, frame.__class__.__name__))
try:
flag = False
# return is not taken into account for breakpoint hit because we'd have a double-hit in this case
# (one for the line and the other for the return).
stop_info = {}
breakpoint = None
exist_result = False
stop = False
bp_type = None
if not is_return and info.pydev_state != STATE_SUSPEND and breakpoints_for_file is not None and line in breakpoints_for_file:
breakpoint = breakpoints_for_file[line]
new_frame = frame
stop = True
if step_cmd == CMD_STEP_OVER and stop_frame is frame and (is_line or is_return):
stop = False # we don't stop on breakpoint if we have to stop by step-over (it will be processed later)
elif plugin_manager is not None and main_debugger.has_plugin_line_breaks:
result = plugin_manager.get_breakpoint(main_debugger, self, frame, event, self._args)
if result:
exist_result = True
flag, breakpoint, new_frame, bp_type = result
if breakpoint:
# ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint
# lets do the conditional stuff here
if stop or exist_result:
eval_result = False
if breakpoint.has_condition:
eval_result = handle_breakpoint_condition(main_debugger, info, breakpoint, new_frame)
if breakpoint.expression is not None:
handle_breakpoint_expression(breakpoint, info, new_frame)
if breakpoint.is_logpoint and info.pydev_message is not None and len(info.pydev_message) > 0:
cmd = main_debugger.cmd_factory.make_io_message(info.pydev_message + os.linesep, '1')
main_debugger.writer.add_command(cmd)
if breakpoint.has_condition and not eval_result:
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
if is_call and frame.f_code.co_name in ('<module>', '<lambda>'):
# If we find a call for a module, it means that the module is being imported/executed for the
# first time. In this case we have to ignore this hit as it may later duplicated by a
# line event at the same place (so, if there's a module with a print() in the first line
# the user will hit that line twice, which is not what we want).
#
# As for lambda, as it only has a single statement, it's not interesting to trace
# its call and later its line event as they're usually in the same line.
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
else:
# if the frame is traced after breakpoint stop,
# but the file should be ignored while stepping because of filters
if step_cmd != -1:
if main_debugger.is_filter_enabled and main_debugger.is_ignored_by_filters(filename):
# ignore files matching stepping filters
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
if main_debugger.is_filter_libraries and not main_debugger.in_project_scope(filename):
# ignore library files while stepping
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
if main_debugger.show_return_values or main_debugger.remove_return_values_flag:
self.manage_return_values(main_debugger, frame, event, arg)
if stop:
self.set_suspend(
thread,
CMD_SET_BREAK,
suspend_other_threads=breakpoint and breakpoint.suspend_policy == "ALL",
)
elif flag and plugin_manager is not None:
result = plugin_manager.suspend(main_debugger, thread, frame, bp_type)
if result:
frame = result
# if thread has a suspend flag, we suspend with a busy wait
if info.pydev_state == STATE_SUSPEND:
self.do_wait_suspend(thread, frame, event, arg)
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
else:
if not breakpoint and is_line:
# No stop from anyone and no breakpoint found in line (cache that).
frame_skips_cache[line_cache_key] = 0
except KeyboardInterrupt:
self.clear_run_state(info)
raise
except:
traceback.print_exc()
raise
# step handling. We stop when we hit the right frame
try:
should_skip = 0
if pydevd_dont_trace.should_trace_hook is not None:
if self.should_skip == -1:
# I.e.: cache the result on self.should_skip (no need to evaluate the same frame multiple times).
# Note that on a code reload, we won't re-evaluate this because in practice, the frame.f_code
# Which will be handled by this frame is read-only, so, we can cache it safely.
if not pydevd_dont_trace.should_trace_hook(frame, filename):
# -1, 0, 1 to be Cython-friendly
should_skip = self.should_skip = 1
else:
should_skip = self.should_skip = 0
else:
should_skip = self.should_skip
plugin_stop = False
if should_skip:
stop = False
elif step_cmd == CMD_STEP_INTO:
stop = is_line or is_return
if plugin_manager is not None:
result = plugin_manager.cmd_step_into(main_debugger, frame, event, self._args, stop_info, stop)
if result:
stop, plugin_stop = result
elif step_cmd == CMD_STEP_INTO_MY_CODE:
if main_debugger.in_project_scope(frame.f_code.co_filename):
stop = is_line
elif step_cmd == CMD_STEP_OVER:
stop = stop_frame is frame and (is_line or is_return)
if frame.f_code.co_flags & CO_GENERATOR:
if is_return:
stop = False
if plugin_manager is not None:
result = plugin_manager.cmd_step_over(main_debugger, frame, event, self._args, stop_info, stop)
if result:
stop, plugin_stop = result
elif step_cmd == CMD_SMART_STEP_INTO:
stop = False
if info.pydev_smart_step_stop is frame:
info.pydev_func_name = '.invalid.' # Must match the type in cython
info.pydev_smart_step_stop = None
if is_line or is_exception_event:
curr_func_name = frame.f_code.co_name
# global context is set with an empty name
if curr_func_name in ('?', '<module>') or curr_func_name is None:
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
stop = True
elif step_cmd == CMD_STEP_RETURN:
stop = is_return and stop_frame is frame
else:
stop = False
if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"):
f_code = getattr(frame.f_back, 'f_code', None)
if f_code is not None:
back_filename = os.path.basename(f_code.co_filename)
file_type = get_file_type(back_filename)
if file_type == PYDEV_FILE:
stop = False
if plugin_stop:
stopped_on_plugin = plugin_manager.stop(main_debugger, frame, event, self._args, stop_info, arg, step_cmd)
elif stop:
if is_line:
self.set_suspend(thread, step_cmd)
self.do_wait_suspend(thread, frame, event, arg)
else: # return event
back = frame.f_back
if back is not None:
# When we get to the pydevd run function, the debugging has actually finished for the main thread
# (note that it can still go on for other threads, but for this one, we just make it finish)
# So, just setting it to None should be OK
_, back_filename, base = get_abs_path_real_path_and_base_from_frame(back)
if (base, back.f_code.co_name) in (DEBUG_START, DEBUG_START_PY3K):
back = None
elif base == TRACE_PROPERTY:
# We dont want to trace the return event of pydevd_traceproperty (custom property for debugging)
# if we're in a return, we want it to appear to the user in the previous frame!
if not is_call: frame.f_trace = NO_FTRACE
return None
elif pydevd_dont_trace.should_trace_hook is not None:
if not pydevd_dont_trace.should_trace_hook(back, back_filename):
# In this case, we'll have to skip the previous one because it shouldn't be traced.
# Also, we have to reset the tracing, because if the parent's parent (or some
# other parent) has to be traced and it's not currently, we wouldn't stop where
# we should anymore (so, a step in/over/return may not stop anywhere if no parent is traced).
# Related test: _debugger_case17a.py
main_debugger.set_trace_for_frame_and_parents(back)
if not is_call: frame.f_trace = NO_FTRACE
return None
if back is not None:
# if we're in a return, we want it to appear to the user in the previous frame!
self.set_suspend(thread, step_cmd)
self.do_wait_suspend(thread, back, event, arg)
else:
# in jython we may not have a back frame
self.clear_run_state(info)
except KeyboardInterrupt:
self.clear_run_state(info)
raise
except:
try:
traceback.print_exc()
info.pydev_step_cmd = -1
except:
if not is_call: frame.f_trace = NO_FTRACE
return None
# if we are quitting, let's stop the tracing
if not main_debugger.quitting:
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_dispatch
else:
if not is_call: frame.f_trace = NO_FTRACE
return None
finally:
info.is_tracing = False
# end trace_dispatch
# IFDEF CYTHON
# cdef _get_instructions(self, frame):
# if self._instructions is None:
# self._instructions = list(dis.get_instructions(frame.f_code))
# return self._instructions
# ENDIF
|
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from builtins import object
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param frozen_lockfile: True to disallow automatic update of lock files.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
frozen_lockfile=True,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param frozen_lockfile: True to disallow automatic update of lock files.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force,
frozen_lockfile=frozen_lockfile)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
if frozen_lockfile:
return_args.append('--frozen-lockfile')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
LOG.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstallationVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
if frozen_lockfile:
LOG.warning('{} does not support frozen lockfile option. Ignored.'.format(self.name))
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
LOG.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--save-exact',
PackageInstallationVersionOption.TILDE: None,
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored.'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
def run_cli(self, cli, args=None, node_paths=None):
raise RuntimeError('npm does not support run cli directly. Please use Yarn instead.')
|
|
from sympy import (symbols, Symbol, nan, oo, zoo, I, sinh, sin, pi, atan,
acos, Rational, sqrt, asin, acot, coth, E, S, tan, tanh, cos,
cosh, atan2, exp, log, asinh, acoth, atanh, O, cancel, Matrix, re, im,
Float, Pow, gcd, sec, csc, cot, diff, simplify, Heaviside, arg,
conjugate, series, FiniteSet, asec, acsc)
from sympy.utilities.pytest import XFAIL, slow, raises
from sympy.core.compatibility import xrange
x, y, z = symbols('x y z')
r = Symbol('r', real=True)
k = Symbol('k', integer=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
def test_sin():
x, y = symbols('x y')
assert sin.nargs == FiniteSet(1)
assert sin(nan) == nan
assert sin(oo*I) == oo*I
assert sin(-oo*I) == -oo*I
assert sin(oo).args[0] == oo
assert sin(0) == 0
assert sin(asin(x)) == x
assert sin(atan(x)) == x / sqrt(1 + x**2)
assert sin(acos(x)) == sqrt(1 - x**2)
assert sin(acot(x)) == 1 / (sqrt(1 + 1 / x**2) * x)
assert sin(atan2(y, x)) == y / sqrt(x**2 + y**2)
assert sin(pi*I) == sinh(pi)*I
assert sin(-pi*I) == -sinh(pi)*I
assert sin(-2*I) == -sinh(2)*I
assert sin(pi) == 0
assert sin(-pi) == 0
assert sin(2*pi) == 0
assert sin(-2*pi) == 0
assert sin(-3*10**73*pi) == 0
assert sin(7*10**103*pi) == 0
assert sin(pi/2) == 1
assert sin(-pi/2) == -1
assert sin(5*pi/2) == 1
assert sin(7*pi/2) == -1
n = symbols('n', integer=True)
assert sin(pi*n/2) == (-1)**(n/2 - S.Half)
assert sin(pi/3) == S.Half*sqrt(3)
assert sin(-2*pi/3) == -S.Half*sqrt(3)
assert sin(pi/4) == S.Half*sqrt(2)
assert sin(-pi/4) == -S.Half*sqrt(2)
assert sin(17*pi/4) == S.Half*sqrt(2)
assert sin(-3*pi/4) == -S.Half*sqrt(2)
assert sin(pi/6) == S.Half
assert sin(-pi/6) == -S.Half
assert sin(7*pi/6) == -S.Half
assert sin(-5*pi/6) == -S.Half
assert sin(1*pi/5) == sqrt((5 - sqrt(5)) / 8)
assert sin(2*pi/5) == sqrt((5 + sqrt(5)) / 8)
assert sin(3*pi/5) == sin(2*pi/5)
assert sin(4*pi/5) == sin(1*pi/5)
assert sin(6*pi/5) == -sin(1*pi/5)
assert sin(8*pi/5) == -sin(2*pi/5)
assert sin(-1273*pi/5) == -sin(2*pi/5)
assert sin(pi/8) == sqrt((2 - sqrt(2))/4)
assert sin(104*pi/105) == sin(pi/105)
assert sin(106*pi/105) == -sin(pi/105)
assert sin(-104*pi/105) == -sin(pi/105)
assert sin(-106*pi/105) == sin(pi/105)
assert sin(x*I) == sinh(x)*I
assert sin(k*pi) == 0
assert sin(17*k*pi) == 0
assert sin(k*pi*I) == sinh(k*pi)*I
assert sin(r).is_real is True
assert isinstance(sin( re(x) - im(y)), sin) is True
assert isinstance(sin(-re(x) + im(y)), sin) is False
for d in list(range(1, 22)) + [60, 85]:
for n in xrange(0, d*2 + 1):
x = n*pi/d
e = abs( float(sin(x)) - sin(float(x)) )
assert e < 1e-12
def test_sin_cos():
for d in [1, 2, 3, 4, 5, 6, 10, 12]: # list is not exhaustive...
for n in xrange(-2*d, d*2):
x = n*pi/d
assert sin(x + pi/2) == cos(x), "fails for %d*pi/%d" % (n, d)
assert sin(x - pi/2) == -cos(x), "fails for %d*pi/%d" % (n, d)
assert sin(x) == cos(x - pi/2), "fails for %d*pi/%d" % (n, d)
assert -sin(x) == cos(x + pi/2), "fails for %d*pi/%d" % (n, d)
def test_sin_series():
assert sin(x).series(x, 0, 9) == \
x - x**3/6 + x**5/120 - x**7/5040 + O(x**9)
def test_sin_rewrite():
assert sin(x).rewrite(exp) == -I*(exp(I*x) - exp(-I*x))/2
assert sin(x).rewrite(tan) == 2*tan(x/2)/(1 + tan(x/2)**2)
assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2)
assert sin(sinh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sinh(3)).n()
assert sin(cosh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cosh(3)).n()
assert sin(tanh(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tanh(3)).n()
assert sin(coth(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, coth(3)).n()
assert sin(sin(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, sin(3)).n()
assert sin(cos(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cos(3)).n()
assert sin(tan(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, tan(3)).n()
assert sin(cot(x)).rewrite(
exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cot(3)).n()
assert sin(log(x)).rewrite(Pow) == I*x**-I / 2 - I*x**I /2
assert sin(x).rewrite(csc) == 1/csc(x)
def test_sin_expansion():
# Note: these formulas are not unique. The ones here come from the
# Chebyshev formulas.
assert sin(x + y).expand(trig=True) == sin(x)*cos(y) + cos(x)*sin(y)
assert sin(x - y).expand(trig=True) == sin(x)*cos(y) - cos(x)*sin(y)
assert sin(y - x).expand(trig=True) == cos(x)*sin(y) - sin(x)*cos(y)
assert sin(2*x).expand(trig=True) == 2*sin(x)*cos(x)
assert sin(3*x).expand(trig=True) == -4*sin(x)**3 + 3*sin(x)
assert sin(4*x).expand(trig=True) == -8*sin(x)**3*cos(x) + 4*sin(x)*cos(x)
assert sin(2).expand(trig=True) == 2*sin(1)*cos(1)
assert sin(3).expand(trig=True) == -4*sin(1)**3 + 3*sin(1)
def test_trig_symmetry():
assert sin(-x) == -sin(x)
assert cos(-x) == cos(x)
assert tan(-x) == -tan(x)
assert cot(-x) == -cot(x)
assert sin(x + pi) == -sin(x)
assert sin(x + 2*pi) == sin(x)
assert sin(x + 3*pi) == -sin(x)
assert sin(x + 4*pi) == sin(x)
assert sin(x - 5*pi) == -sin(x)
assert cos(x + pi) == -cos(x)
assert cos(x + 2*pi) == cos(x)
assert cos(x + 3*pi) == -cos(x)
assert cos(x + 4*pi) == cos(x)
assert cos(x - 5*pi) == -cos(x)
assert tan(x + pi) == tan(x)
assert tan(x - 3*pi) == tan(x)
assert cot(x + pi) == cot(x)
assert cot(x - 3*pi) == cot(x)
assert sin(pi/2 - x) == cos(x)
assert sin(3*pi/2 - x) == -cos(x)
assert sin(5*pi/2 - x) == cos(x)
assert cos(pi/2 - x) == sin(x)
assert cos(3*pi/2 - x) == -sin(x)
assert cos(5*pi/2 - x) == sin(x)
assert tan(pi/2 - x) == cot(x)
assert tan(3*pi/2 - x) == cot(x)
assert tan(5*pi/2 - x) == cot(x)
assert cot(pi/2 - x) == tan(x)
assert cot(3*pi/2 - x) == tan(x)
assert cot(5*pi/2 - x) == tan(x)
assert sin(pi/2 + x) == cos(x)
assert cos(pi/2 + x) == -sin(x)
assert tan(pi/2 + x) == -cot(x)
assert cot(pi/2 + x) == -tan(x)
def test_cos():
x, y = symbols('x y')
assert cos.nargs == FiniteSet(1)
assert cos(nan) == nan
assert cos(oo*I) == oo
assert cos(-oo*I) == oo
assert cos(0) == 1
assert cos(acos(x)) == x
assert cos(atan(x)) == 1 / sqrt(1 + x**2)
assert cos(asin(x)) == sqrt(1 - x**2)
assert cos(acot(x)) == 1 / sqrt(1 + 1 / x**2)
assert cos(atan2(y, x)) == x / sqrt(x**2 + y**2)
assert cos(pi*I) == cosh(pi)
assert cos(-pi*I) == cosh(pi)
assert cos(-2*I) == cosh(2)
assert cos(pi/2) == 0
assert cos(-pi/2) == 0
assert cos(pi/2) == 0
assert cos(-pi/2) == 0
assert cos((-3*10**73 + 1)*pi/2) == 0
assert cos((7*10**103 + 1)*pi/2) == 0
n = symbols('n', integer=True)
assert cos(pi*n/2) == 0
assert cos(pi) == -1
assert cos(-pi) == -1
assert cos(2*pi) == 1
assert cos(5*pi) == -1
assert cos(8*pi) == 1
assert cos(pi/3) == S.Half
assert cos(-2*pi/3) == -S.Half
assert cos(pi/4) == S.Half*sqrt(2)
assert cos(-pi/4) == S.Half*sqrt(2)
assert cos(11*pi/4) == -S.Half*sqrt(2)
assert cos(-3*pi/4) == -S.Half*sqrt(2)
assert cos(pi/6) == S.Half*sqrt(3)
assert cos(-pi/6) == S.Half*sqrt(3)
assert cos(7*pi/6) == -S.Half*sqrt(3)
assert cos(-5*pi/6) == -S.Half*sqrt(3)
assert cos(1*pi/5) == (sqrt(5) + 1)/4
assert cos(2*pi/5) == (sqrt(5) - 1)/4
assert cos(3*pi/5) == -cos(2*pi/5)
assert cos(4*pi/5) == -cos(1*pi/5)
assert cos(6*pi/5) == -cos(1*pi/5)
assert cos(8*pi/5) == cos(2*pi/5)
assert cos(-1273*pi/5) == -cos(2*pi/5)
assert cos(pi/8) == sqrt((2 + sqrt(2))/4)
assert cos(104*pi/105) == -cos(pi/105)
assert cos(106*pi/105) == -cos(pi/105)
assert cos(-104*pi/105) == -cos(pi/105)
assert cos(-106*pi/105) == -cos(pi/105)
assert cos(x*I) == cosh(x)
assert cos(k*pi*I) == cosh(k*pi)
assert cos(r).is_real is True
assert cos(k*pi) == (-1)**k
assert cos(2*k*pi) == 1
for d in list(range(1, 22)) + [60, 85]:
for n in xrange(0, 2*d + 1):
x = n*pi/d
e = abs( float(cos(x)) - cos(float(x)) )
assert e < 1e-12
def test_issue_6190():
c = Float('123456789012345678901234567890.25', '')
for cls in [sin, cos, tan, cot]:
assert cls(c*pi) == cls(pi/4)
assert cls(4.125*pi) == cls(pi/8)
assert cls(4.7*pi) == cls((4.7 % 2)*pi)
def test_cos_series():
assert cos(x).series(x, 0, 9) == \
1 - x**2/2 + x**4/24 - x**6/720 + x**8/40320 + O(x**9)
def test_cos_rewrite():
assert cos(x).rewrite(exp) == exp(I*x)/2 + exp(-I*x)/2
assert cos(x).rewrite(tan) == (1 - tan(x/2)**2)/(1 + tan(x/2)**2)
assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2)
assert cos(sinh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sinh(3)).n()
assert cos(cosh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cosh(3)).n()
assert cos(tanh(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tanh(3)).n()
assert cos(coth(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, coth(3)).n()
assert cos(sin(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, sin(3)).n()
assert cos(cos(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cos(3)).n()
assert cos(tan(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, tan(3)).n()
assert cos(cot(x)).rewrite(
exp).subs(x, 3).n() == cos(x).rewrite(exp).subs(x, cot(3)).n()
assert cos(log(x)).rewrite(Pow) == x**I/2 + x**-I/2
assert cos(x).rewrite(sec) == 1/sec(x)
def test_cos_expansion():
assert cos(x + y).expand(trig=True) == cos(x)*cos(y) - sin(x)*sin(y)
assert cos(x - y).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y)
assert cos(y - x).expand(trig=True) == cos(x)*cos(y) + sin(x)*sin(y)
assert cos(2*x).expand(trig=True) == 2*cos(x)**2 - 1
assert cos(3*x).expand(trig=True) == 4*cos(x)**3 - 3*cos(x)
assert cos(4*x).expand(trig=True) == 8*cos(x)**4 - 8*cos(x)**2 + 1
assert cos(2).expand(trig=True) == 2*cos(1)**2 - 1
assert cos(3).expand(trig=True) == 4*cos(1)**3 - 3*cos(1)
def test_tan():
assert tan(nan) == nan
assert tan.nargs == FiniteSet(1)
assert tan(oo*I) == I
assert tan(-oo*I) == -I
assert tan(0) == 0
assert tan(atan(x)) == x
assert tan(asin(x)) == x / sqrt(1 - x**2)
assert tan(acos(x)) == sqrt(1 - x**2) / x
assert tan(acot(x)) == 1 / x
assert tan(atan2(y, x)) == y/x
assert tan(pi*I) == tanh(pi)*I
assert tan(-pi*I) == -tanh(pi)*I
assert tan(-2*I) == -tanh(2)*I
assert tan(pi) == 0
assert tan(-pi) == 0
assert tan(2*pi) == 0
assert tan(-2*pi) == 0
assert tan(-3*10**73*pi) == 0
assert tan(pi/2) == zoo
assert tan(3*pi/2) == zoo
assert tan(pi/3) == sqrt(3)
assert tan(-2*pi/3) == sqrt(3)
assert tan(pi/4) == S.One
assert tan(-pi/4) == -S.One
assert tan(17*pi/4) == S.One
assert tan(-3*pi/4) == S.One
assert tan(pi/6) == 1/sqrt(3)
assert tan(-pi/6) == -1/sqrt(3)
assert tan(7*pi/6) == 1/sqrt(3)
assert tan(-5*pi/6) == 1/sqrt(3)
assert tan(x*I) == tanh(x)*I
assert tan(k*pi) == 0
assert tan(17*k*pi) == 0
assert tan(k*pi*I) == tanh(k*pi)*I
assert tan(r).is_real is True
assert tan(10*pi/7) == tan(3*pi/7)
assert tan(11*pi/7) == -tan(3*pi/7)
assert tan(-11*pi/7) == tan(3*pi/7)
def test_tan_series():
assert tan(x).series(x, 0, 9) == \
x + x**3/3 + 2*x**5/15 + 17*x**7/315 + O(x**9)
def test_tan_rewrite():
neg_exp, pos_exp = exp(-x*I), exp(x*I)
assert tan(x).rewrite(exp) == I*(neg_exp - pos_exp)/(neg_exp + pos_exp)
assert tan(x).rewrite(sin) == 2*sin(x)**2/sin(2*x)
assert tan(x).rewrite(cos) == -cos(x + S.Pi/2)/cos(x)
assert tan(x).rewrite(cot) == 1/cot(x)
assert tan(sinh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sinh(3)).n()
assert tan(cosh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cosh(3)).n()
assert tan(tanh(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tanh(3)).n()
assert tan(coth(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, coth(3)).n()
assert tan(sin(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, sin(3)).n()
assert tan(cos(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cos(3)).n()
assert tan(tan(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, tan(3)).n()
assert tan(cot(x)).rewrite(
exp).subs(x, 3).n() == tan(x).rewrite(exp).subs(x, cot(3)).n()
assert tan(log(x)).rewrite(Pow) == I*(x**-I - x**I)/(x**-I + x**I)
assert 0 == (cos(pi/15)*tan(pi/15) - sin(pi/15)).rewrite(pow)
assert tan(pi/19).rewrite(pow) == tan(pi/19)
assert tan(8*pi/19).rewrite(sqrt) == tan(8*pi/19)
def test_tan_subs():
assert tan(x).subs(tan(x), y) == y
assert tan(x).subs(x, y) == tan(y)
assert tan(x).subs(x, S.Pi/2) == zoo
assert tan(x).subs(x, 3*S.Pi/2) == zoo
def test_tan_expansion():
assert tan(x + y).expand(trig=True) == ((tan(x) + tan(y))/(1 - tan(x)*tan(y))).expand()
assert tan(x - y).expand(trig=True) == ((tan(x) - tan(y))/(1 + tan(x)*tan(y))).expand()
assert tan(x + y + z).expand(trig=True) == (
(tan(x) + tan(y) + tan(z) - tan(x)*tan(y)*tan(z))/
(1 - tan(x)*tan(y) - tan(x)*tan(z) - tan(y)*tan(z))).expand()
assert 0 == tan(2*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 7))])*24 - 7
assert 0 == tan(3*x).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*55 - 37
assert 0 == tan(4*x - pi/4).expand(trig=True).rewrite(tan).subs([(tan(x), Rational(1, 5))])*239 - 1
def test_cot():
assert cot(nan) == nan
assert cot.nargs == FiniteSet(1)
assert cot(oo*I) == -I
assert cot(-oo*I) == I
assert cot(0) == zoo
assert cot(2*pi) == zoo
assert cot(acot(x)) == x
assert cot(atan(x)) == 1 / x
assert cot(asin(x)) == sqrt(1 - x**2) / x
assert cot(acos(x)) == x / sqrt(1 - x**2)
assert cot(atan2(y, x)) == x/y
assert cot(pi*I) == -coth(pi)*I
assert cot(-pi*I) == coth(pi)*I
assert cot(-2*I) == coth(2)*I
assert cot(pi) == cot(2*pi) == cot(3*pi)
assert cot(-pi) == cot(-2*pi) == cot(-3*pi)
assert cot(pi/2) == 0
assert cot(-pi/2) == 0
assert cot(5*pi/2) == 0
assert cot(7*pi/2) == 0
assert cot(pi/3) == 1/sqrt(3)
assert cot(-2*pi/3) == 1/sqrt(3)
assert cot(pi/4) == S.One
assert cot(-pi/4) == -S.One
assert cot(17*pi/4) == S.One
assert cot(-3*pi/4) == S.One
assert cot(pi/6) == sqrt(3)
assert cot(-pi/6) == -sqrt(3)
assert cot(7*pi/6) == sqrt(3)
assert cot(-5*pi/6) == sqrt(3)
assert cot(x*I) == -coth(x)*I
assert cot(k*pi*I) == -coth(k*pi)*I
assert cot(r).is_real is True
assert cot(10*pi/7) == cot(3*pi/7)
assert cot(11*pi/7) == -cot(3*pi/7)
assert cot(-11*pi/7) == cot(3*pi/7)
assert cot(x).is_bounded is None
assert cot(r).is_bounded is None
i = Symbol('i', imaginary=True)
assert cot(i).is_bounded is True
assert cot(x).subs(x, 3*pi) == zoo
def test_cot_series():
assert cot(x).series(x, 0, 9) == \
1/x - x/3 - x**3/45 - 2*x**5/945 - x**7/4725 + O(x**9)
# issue 6210
assert cot(x**4 + x**5).series(x, 0, 1) == \
x**(-4) - 1/x**3 + x**(-2) - 1/x + 1 + O(x)
def test_cot_rewrite():
neg_exp, pos_exp = exp(-x*I), exp(x*I)
assert cot(x).rewrite(exp) == I*(pos_exp + neg_exp)/(pos_exp - neg_exp)
assert cot(x).rewrite(sin) == 2*sin(2*x)/sin(x)**2
assert cot(x).rewrite(cos) == -cos(x)/cos(x + S.Pi/2)
assert cot(x).rewrite(tan) == 1/tan(x)
assert cot(sinh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sinh(3)).n()
assert cot(cosh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, cosh(3)).n()
assert cot(tanh(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tanh(3)).n()
assert cot(coth(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, coth(3)).n()
assert cot(sin(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, sin(3)).n()
assert cot(tan(x)).rewrite(
exp).subs(x, 3).n() == cot(x).rewrite(exp).subs(x, tan(3)).n()
assert cot(log(x)).rewrite(Pow) == -I*(x**-I + x**I)/(x**-I - x**I)
assert cot(4*pi/15).rewrite(pow) == (cos(4*pi/15)/sin(4*pi/15)).rewrite(pow)
assert cot(pi/19).rewrite(pow) == cot(pi/19)
assert cot(pi/19).rewrite(sqrt) == cot(pi/19)
def test_cot_subs():
assert cot(x).subs(cot(x), y) == y
assert cot(x).subs(x, y) == cot(y)
assert cot(x).subs(x, 0) == zoo
assert cot(x).subs(x, S.Pi) == zoo
def test_cot_expansion():
assert cot(x + y).expand(trig=True) == ((cot(x)*cot(y) - 1)/(cot(x) + cot(y))).expand()
assert cot(x - y).expand(trig=True) == (-(cot(x)*cot(y) + 1)/(cot(x) - cot(y))).expand()
assert cot(x + y + z).expand(trig=True) == (
(cot(x)*cot(y)*cot(z) - cot(x) - cot(y) - cot(z))/
(-1 + cot(x)*cot(y) + cot(x)*cot(z) + cot(y)*cot(z))).expand()
assert cot(3*x).expand(trig=True) == ((cot(x)**3 - 3*cot(x))/(3*cot(x)**2 - 1)).expand()
assert 0 == cot(2*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 3))])*3 + 4
assert 0 == cot(3*x).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 5))])*55 - 37
assert 0 == cot(4*x - pi/4).expand(trig=True).rewrite(cot).subs([(cot(x), Rational(1, 7))])*863 + 191
def test_asin():
assert asin(nan) == nan
assert asin.nargs == FiniteSet(1)
assert asin(oo) == -I*oo
assert asin(-oo) == I*oo
# Note: asin(-x) = - asin(x)
assert asin(0) == 0
assert asin(1) == pi/2
assert asin(-1) == -pi/2
assert asin(sqrt(3)/2) == pi/3
assert asin(-sqrt(3)/2) == -pi/3
assert asin(sqrt(2)/2) == pi/4
assert asin(-sqrt(2)/2) == -pi/4
assert asin(sqrt((5 - sqrt(5))/8)) == pi/5
assert asin(-sqrt((5 - sqrt(5))/8)) == -pi/5
assert asin(Rational(1, 2)) == pi/6
assert asin(-Rational(1, 2)) == -pi/6
assert asin((sqrt(2 - sqrt(2)))/2) == pi/8
assert asin(-(sqrt(2 - sqrt(2)))/2) == -pi/8
assert asin((sqrt(5) - 1)/4) == pi/10
assert asin(-(sqrt(5) - 1)/4) == -pi/10
assert asin((sqrt(3) - 1)/sqrt(2**3)) == pi/12
assert asin(-(sqrt(3) - 1)/sqrt(2**3)) == -pi/12
assert asin(x).diff(x) == 1/sqrt(1 - x**2)
assert asin(0.2).is_real is True
assert asin(-2).is_real is False
assert asin(r).is_real is None
assert asin(-2*I) == -I*asinh(2)
assert asin(Rational(1, 7), evaluate=False).is_positive is True
assert asin(Rational(-1, 7), evaluate=False).is_positive is False
assert asin(p).is_positive is None
def test_asin_series():
assert asin(x).series(x, 0, 9) == \
x + x**3/6 + 3*x**5/40 + 5*x**7/112 + O(x**9)
t5 = asin(x).taylor_term(5, x)
assert t5 == 3*x**5/40
assert asin(x).taylor_term(7, x, t5, 0) == 5*x**7/112
def test_asin_rewrite():
assert asin(x).rewrite(log) == -I*log(I*x + sqrt(1 - x**2))
assert asin(x).rewrite(atan) == 2*atan(x/(1 + sqrt(1 - x**2)))
assert asin(x).rewrite(acos) == S.Pi/2 - acos(x)
assert asin(x).rewrite(acot) == 2*acot((sqrt(-x**2 + 1) + 1)/x)
assert asin(x).rewrite(asec) == -asec(1/x) + pi/2
assert asin(x).rewrite(acsc) == acsc(1/x)
def test_acos():
assert acos(nan) == nan
assert acos.nargs == FiniteSet(1)
assert acos(oo) == I*oo
assert acos(-oo) == -I*oo
# Note: acos(-x) = pi - acos(x)
assert acos(0) == pi/2
assert acos(Rational(1, 2)) == pi/3
assert acos(-Rational(1, 2)) == (2*pi)/3
assert acos(1) == 0
assert acos(-1) == pi
assert acos(sqrt(2)/2) == pi/4
assert acos(-sqrt(2)/2) == (3*pi)/4
assert acos(x).diff(x) == -1/sqrt(1 - x**2)
assert acos(0.2).is_real is True
assert acos(-2).is_real is False
assert acos(r).is_real is None
assert acos(Rational(1, 7), evaluate=False).is_positive is True
assert acos(Rational(-1, 7), evaluate=False).is_positive is True
assert acos(Rational(3, 2), evaluate=False).is_positive is False
assert acos(p).is_positive is None
assert acos(2 + p).conjugate() != acos(10 + p)
assert acos(-3 + n).conjugate() != acos(-3 + n)
assert acos(S.One/3).conjugate() == acos(S.One/3)
assert acos(-S.One/3).conjugate() == acos(-S.One/3)
assert acos(p + n*I).conjugate() == acos(p - n*I)
assert acos(z).conjugate() != acos(conjugate(z))
def test_acos_series():
assert acos(x).series(x, 0, 8) == \
pi/2 - x - x**3/6 - 3*x**5/40 - 5*x**7/112 + O(x**8)
assert acos(x).series(x, 0, 8) == pi/2 - asin(x).series(x, 0, 8)
t5 = acos(x).taylor_term(5, x)
assert t5 == -3*x**5/40
assert acos(x).taylor_term(7, x, t5, 0) == -5*x**7/112
def test_acos_rewrite():
assert acos(x).rewrite(log) == pi/2 + I*log(I*x + sqrt(1 - x**2))
assert acos(x).rewrite(atan) == \
atan(sqrt(1 - x**2)/x) + (pi/2)*(1 - x*sqrt(1/x**2))
assert acos(0).rewrite(atan) == S.Pi/2
assert acos(0.5).rewrite(atan) == acos(0.5).rewrite(log)
assert acos(x).rewrite(asin) == S.Pi/2 - asin(x)
assert acos(x).rewrite(acot) == -2*acot((sqrt(-x**2 + 1) + 1)/x) + pi/2
assert acos(x).rewrite(asec) == asec(1/x)
assert acos(x).rewrite(acsc) == -acsc(1/x) + pi/2
def test_atan():
assert atan(nan) == nan
assert atan.nargs == FiniteSet(1)
assert atan(oo) == pi/2
assert atan(-oo) == -pi/2
assert atan(0) == 0
assert atan(1) == pi/4
assert atan(sqrt(3)) == pi/3
assert atan(oo) == pi/2
assert atan(x).diff(x) == 1/(1 + x**2)
assert atan(r).is_real is True
assert atan(-2*I) == -I*atanh(2)
assert atan(p).is_positive is True
assert atan(n).is_positive is False
assert atan(x).is_positive is None
def test_atan_rewrite():
assert atan(x).rewrite(log) == I*log((1 - I*x)/(1 + I*x))/2
assert atan(x).rewrite(asin) == (-asin(1/sqrt(x**2 + 1)) + pi/2)*sqrt(x**2)/x
assert atan(x).rewrite(acos) == sqrt(x**2)*acos(1/sqrt(x**2 + 1))/x
assert atan(x).rewrite(acot) == acot(1/x)
assert atan(x).rewrite(asec) == sqrt(x**2)*asec(sqrt(x**2 + 1))/x
assert atan(x).rewrite(acsc) == (-acsc(sqrt(x**2 + 1)) + pi/2)*sqrt(x**2)/x
def test_atan2():
assert atan2.nargs == FiniteSet(2)
assert atan2(0, 0) == S.NaN
assert atan2(0, 1) == 0
assert atan2(1, 1) == pi/4
assert atan2(1, 0) == pi/2
assert atan2(1, -1) == 3*pi/4
assert atan2(0, -1) == pi
assert atan2(-1, -1) == -3*pi/4
assert atan2(-1, 0) == -pi/2
assert atan2(-1, 1) == -pi/4
i = symbols('i', imaginary=True)
r = symbols('r', real=True)
eq = atan2(r, i)
ans = -I*log((i + I*r)/sqrt(i**2 + r**2))
reps = ((r, 2), (i, I))
assert eq.subs(reps) == ans.subs(reps)
u = Symbol("u", positive=True)
assert atan2(0, u) == 0
u = Symbol("u", negative=True)
assert atan2(0, u) == pi
assert atan2(y, oo) == 0
assert atan2(y, -oo)== 2*pi*Heaviside(re(y)) - pi
assert atan2(y, x).rewrite(log) == -I*log((x + I*y)/sqrt(x**2 + y**2))
assert atan2(y, x).rewrite(atan) == 2*atan(y/(x + sqrt(x**2 + y**2)))
ex = atan2(y, x) - arg(x + I*y)
assert ex.subs({x:2, y:3}).rewrite(arg) == 0
assert ex.subs({x:2, y:3*I}).rewrite(arg) == -pi - I*log(sqrt(5)*I/5)
assert ex.subs({x:2*I, y:3}).rewrite(arg) == -pi/2 - I*log(sqrt(5)*I)
assert ex.subs({x:2*I, y:3*I}).rewrite(arg) == -pi + atan(2/S(3)) + atan(3/S(2))
i = symbols('i', imaginary=True)
r = symbols('r', real=True)
e = atan2(i, r)
rewrite = e.rewrite(arg)
reps = {i: I, r: -2}
assert rewrite == -I*log(abs(I*i + r)/sqrt(abs(i**2 + r**2))) + arg((I*i + r)/sqrt(i**2 + r**2))
assert (e - rewrite).subs(reps).equals(0)
assert conjugate(atan2(x, y)) == atan2(conjugate(x), conjugate(y))
assert diff(atan2(y, x), x) == -y/(x**2 + y**2)
assert diff(atan2(y, x), y) == x/(x**2 + y**2)
assert simplify(diff(atan2(y, x).rewrite(log), x)) == -y/(x**2 + y**2)
assert simplify(diff(atan2(y, x).rewrite(log), y)) == x/(x**2 + y**2)
def test_acot():
assert acot(nan) == nan
assert acot.nargs == FiniteSet(1)
assert acot(-oo) == 0
assert acot(oo) == 0
assert acot(1) == pi/4
assert acot(0) == pi/2
assert acot(sqrt(3)/3) == pi/3
assert acot(1/sqrt(3)) == pi/3
assert acot(-1/sqrt(3)) == -pi/3
assert acot(x).diff(x) == -1/(1 + x**2)
assert acot(r).is_real is True
assert acot(I*pi) == -I*acoth(pi)
assert acot(-2*I) == I*acoth(2)
assert acot(x).is_positive is None
assert acot(r).is_positive is True
assert acot(p).is_positive is True
assert acot(I).is_positive is False
def test_acot_rewrite():
assert acot(x).rewrite(log) == I*log((x - I)/(x + I))/2
assert acot(x).rewrite(asin) == x*(-asin(sqrt(-x**2)/sqrt(-x**2 - 1)) + pi/2)*sqrt(x**(-2))
assert acot(x).rewrite(acos) == x*sqrt(x**(-2))*acos(sqrt(-x**2)/sqrt(-x**2 - 1))
assert acot(x).rewrite(atan) == atan(1/x)
assert acot(x).rewrite(asec) == x*sqrt(x**(-2))*asec(sqrt((x**2 + 1)/x**2))
assert acot(x).rewrite(acsc) == x*(-acsc(sqrt((x**2 + 1)/x**2)) + pi/2)*sqrt(x**(-2))
def test_attributes():
assert sin(x).args == (x,)
def test_sincos_rewrite():
assert sin(pi/2 - x) == cos(x)
assert sin(pi - x) == sin(x)
assert cos(pi/2 - x) == sin(x)
assert cos(pi - x) == -cos(x)
def _check_even_rewrite(func, arg):
"""Checks that the expr has been rewritten using f(-x) -> f(x)
arg : -x
"""
return func(arg).args[0] == -arg
def _check_odd_rewrite(func, arg):
"""Checks that the expr has been rewritten using f(-x) -> -f(x)
arg : -x
"""
return func(arg).func.is_Mul
def _check_no_rewrite(func, arg):
"""Checks that the expr is not rewritten"""
return func(arg).args[0] == arg
def test_evenodd_rewrite():
a = cos(2) # negative
b = sin(1) # positive
even = [cos]
odd = [sin, tan, cot, asin, atan, acot]
with_minus = [-1, -2**1024 * E, -pi/105, -x*y, -x - y]
for func in even:
for expr in with_minus:
assert _check_even_rewrite(func, expr)
assert _check_no_rewrite(func, a*b)
assert func(
x - y) == func(y - x) # it doesn't matter which form is canonical
for func in odd:
for expr in with_minus:
assert _check_odd_rewrite(func, expr)
assert _check_no_rewrite(func, a*b)
assert func(
x - y) == -func(y - x) # it doesn't matter which form is canonical
def test_issue_4547():
assert sin(x).rewrite(cot) == 2*cot(x/2)/(1 + cot(x/2)**2)
assert cos(x).rewrite(cot) == -(1 - cot(x/2)**2)/(1 + cot(x/2)**2)
assert tan(x).rewrite(cot) == 1/cot(x)
assert cot(x).fdiff() == -1 - cot(x)**2
def test_as_leading_term_issue_5272():
assert sin(x).as_leading_term(x) == x
assert cos(x).as_leading_term(x) == 1
assert tan(x).as_leading_term(x) == x
assert cot(x).as_leading_term(x) == 1/x
assert asin(x).as_leading_term(x) == x
assert acos(x).as_leading_term(x) == x
assert atan(x).as_leading_term(x) == x
assert acot(x).as_leading_term(x) == x
def test_leading_terms():
for func in [sin, cos, tan, cot, asin, acos, atan, acot]:
for arg in (1/x, S.Half):
eq = func(arg)
assert eq.as_leading_term(x) == eq
def test_atan2_expansion():
assert cancel(atan2(x**2, x + 1).diff(x) - atan(x**2/(x + 1)).diff(x)) == 0
assert cancel(atan(y/x).series(y, 0, 5) - atan2(y, x).series(y, 0, 5)
+ atan2(0, x) - atan(0)) == O(y**5)
assert cancel(atan(y/x).series(x, 1, 4) - atan2(y, x).series(x, 1, 4)
+ atan2(y, 1) - atan(y)) == O((x - 1)**4, (x, 1))
assert cancel(atan((y + x)/x).series(x, 1, 3) - atan2(y + x, x).series(x, 1, 3)
+ atan2(1 + y, 1) - atan(1 + y)) == O((x - 1)**3, (x, 1))
assert Matrix([atan2(y, x)]).jacobian([y, x]) == \
Matrix([[x/(y**2 + x**2), -y/(y**2 + x**2)]])
def test_aseries():
def t(n, v, d, e):
assert abs(
n(1/v).evalf() - n(1/x).series(x, dir=d).removeO().subs(x, v)) < e
t(atan, 0.1, '+', 1e-5)
t(atan, -0.1, '-', 1e-5)
t(acot, 0.1, '+', 1e-5)
t(acot, -0.1, '-', 1e-5)
def test_issue_4420():
i = Symbol('i', integer=True)
e = Symbol('e', even=True)
o = Symbol('o', odd=True)
# unknown parity for variable
assert cos(4*i*pi) == 1
assert sin(4*i*pi) == 0
assert tan(4*i*pi) == 0
assert cot(4*i*pi) == zoo
assert cos(3*i*pi) == cos(pi*i) # +/-1
assert sin(3*i*pi) == 0
assert tan(3*i*pi) == 0
assert cot(3*i*pi) == zoo
assert cos(4.0*i*pi) == 1
assert sin(4.0*i*pi) == 0
assert tan(4.0*i*pi) == 0
assert cot(4.0*i*pi) == zoo
assert cos(3.0*i*pi) == cos(pi*i) # +/-1
assert sin(3.0*i*pi) == 0
assert tan(3.0*i*pi) == 0
assert cot(3.0*i*pi) == zoo
assert cos(4.5*i*pi) == cos(0.5*pi*i)
assert sin(4.5*i*pi) == sin(0.5*pi*i)
assert tan(4.5*i*pi) == tan(0.5*pi*i)
assert cot(4.5*i*pi) == cot(0.5*pi*i)
# parity of variable is known
assert cos(4*e*pi) == 1
assert sin(4*e*pi) == 0
assert tan(4*e*pi) == 0
assert cot(4*e*pi) == zoo
assert cos(3*e*pi) == 1
assert sin(3*e*pi) == 0
assert tan(3*e*pi) == 0
assert cot(3*e*pi) == zoo
assert cos(4.0*e*pi) == 1
assert sin(4.0*e*pi) == 0
assert tan(4.0*e*pi) == 0
assert cot(4.0*e*pi) == zoo
assert cos(3.0*e*pi) == 1
assert sin(3.0*e*pi) == 0
assert tan(3.0*e*pi) == 0
assert cot(3.0*e*pi) == zoo
assert cos(4.5*e*pi) == cos(0.5*pi*e)
assert sin(4.5*e*pi) == sin(0.5*pi*e)
assert tan(4.5*e*pi) == tan(0.5*pi*e)
assert cot(4.5*e*pi) == cot(0.5*pi*e)
assert cos(4*o*pi) == 1
assert sin(4*o*pi) == 0
assert tan(4*o*pi) == 0
assert cot(4*o*pi) == zoo
assert cos(3*o*pi) == -1
assert sin(3*o*pi) == 0
assert tan(3*o*pi) == 0
assert cot(3*o*pi) == zoo
assert cos(4.0*o*pi) == 1
assert sin(4.0*o*pi) == 0
assert tan(4.0*o*pi) == 0
assert cot(4.0*o*pi) == zoo
assert cos(3.0*o*pi) == -1
assert sin(3.0*o*pi) == 0
assert tan(3.0*o*pi) == 0
assert cot(3.0*o*pi) == zoo
assert cos(4.5*o*pi) == cos(0.5*pi*o)
assert sin(4.5*o*pi) == sin(0.5*pi*o)
assert tan(4.5*o*pi) == tan(0.5*pi*o)
assert cot(4.5*o*pi) == cot(0.5*pi*o)
# x could be imaginary
assert cos(4*x*pi) == cos(4*pi*x)
assert sin(4*x*pi) == sin(4*pi*x)
assert tan(4*x*pi) == tan(4*pi*x)
assert cot(4*x*pi) == cot(4*pi*x)
assert cos(3*x*pi) == cos(3*pi*x)
assert sin(3*x*pi) == sin(3*pi*x)
assert tan(3*x*pi) == tan(3*pi*x)
assert cot(3*x*pi) == cot(3*pi*x)
assert cos(4.0*x*pi) == cos(4.0*pi*x)
assert sin(4.0*x*pi) == sin(4.0*pi*x)
assert tan(4.0*x*pi) == tan(4.0*pi*x)
assert cot(4.0*x*pi) == cot(4.0*pi*x)
assert cos(3.0*x*pi) == cos(3.0*pi*x)
assert sin(3.0*x*pi) == sin(3.0*pi*x)
assert tan(3.0*x*pi) == tan(3.0*pi*x)
assert cot(3.0*x*pi) == cot(3.0*pi*x)
assert cos(4.5*x*pi) == cos(4.5*pi*x)
assert sin(4.5*x*pi) == sin(4.5*pi*x)
assert tan(4.5*x*pi) == tan(4.5*pi*x)
assert cot(4.5*x*pi) == cot(4.5*pi*x)
def test_inverses():
raises(AttributeError, lambda: sin(x).inverse())
raises(AttributeError, lambda: cos(x).inverse())
assert tan(x).inverse() == atan
assert cot(x).inverse() == acot
raises(AttributeError, lambda: csc(x).inverse())
raises(AttributeError, lambda: sec(x).inverse())
assert asin(x).inverse() == sin
assert acos(x).inverse() == cos
assert atan(x).inverse() == tan
assert acot(x).inverse() == cot
def test_real_imag():
a, b = symbols('a b', real=True)
z = a + b*I
for deep in [True, False]:
assert sin(
z).as_real_imag(deep=deep) == (sin(a)*cosh(b), cos(a)*sinh(b))
assert cos(
z).as_real_imag(deep=deep) == (cos(a)*cosh(b), -sin(a)*sinh(b))
assert tan(z).as_real_imag(deep=deep) == (sin(2*a)/(cos(2*a) +
cosh(2*b)), sinh(2*b)/(cos(2*a) + cosh(2*b)))
assert cot(z).as_real_imag(deep=deep) == (-sin(2*a)/(cos(2*a) -
cosh(2*b)), -sinh(2*b)/(cos(2*a) - cosh(2*b)))
assert sin(a).as_real_imag(deep=deep) == (sin(a), 0)
assert cos(a).as_real_imag(deep=deep) == (cos(a), 0)
assert tan(a).as_real_imag(deep=deep) == (tan(a), 0)
assert cot(a).as_real_imag(deep=deep) == (cot(a), 0)
@XFAIL
def test_sin_cos_with_infinity():
# Test for issue 5196
# https://github.com/sympy/sympy/issues/5196
assert sin(oo) == S.NaN
assert cos(oo) == S.NaN
@slow
def test_sincos_rewrite_sqrt():
# equivalent to testing rewrite(pow)
for p in [1, 3, 5, 17]:
for t in [1, 8]:
n = t*p
for i in xrange(1, (n + 1)//2 + 1):
if 1 == gcd(i, n):
x = i*pi/n
s1 = sin(x).rewrite(sqrt)
c1 = cos(x).rewrite(sqrt)
assert not s1.has(cos, sin), "fails for %d*pi/%d" % (i, n)
assert not c1.has(cos, sin), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs(sin(x.evalf(5)) - s1.evalf(2)), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs(cos(x.evalf(5)) - c1.evalf(2)), "fails for %d*pi/%d" % (i, n)
assert cos(pi/14).rewrite(sqrt) == sqrt(cos(pi/7)/2 + S.Half)
@slow
def test_tancot_rewrite_sqrt():
# equivalent to testing rewrite(pow)
for p in [1, 3, 5, 17]:
for t in [1, 8]:
n = t*p
for i in xrange(1, (n + 1)//2 + 1):
if 1 == gcd(i, n):
x = i*pi/n
if 2*i != n and 3*i != 2*n:
t1 = tan(x).rewrite(sqrt)
assert not t1.has(cot, tan), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs( tan(x.evalf(7)) - t1.evalf(4) ), "fails for %d*pi/%d" % (i, n)
if i != 0 and i != n:
c1 = cot(x).rewrite(sqrt)
assert not c1.has(cot, tan), "fails for %d*pi/%d" % (i, n)
assert 1e-3 > abs( cot(x.evalf(7)) - c1.evalf(4) ), "fails for %d*pi/%d" % (i, n)
def test_sec():
x = symbols('x', real=True)
z = symbols('z')
assert sec.nargs == FiniteSet(1)
assert sec(0) == 1
assert sec(pi) == -1
assert sec(pi/2) == zoo
assert sec(-pi/2) == zoo
assert sec(pi/6) == 2*sqrt(3)/3
assert sec(pi/3) == 2
assert sec(5*pi/2) == zoo
assert sec(9*pi/7) == -sec(2*pi/7)
assert sec(I) == 1/cosh(1)
assert sec(x*I) == 1/cosh(x)
assert sec(-x) == sec(x)
assert sec(asec(x)) == x
assert sec(x).rewrite(exp) == 1/(exp(I*x)/2 + exp(-I*x)/2)
assert sec(x).rewrite(sin) == sec(x)
assert sec(x).rewrite(cos) == 1/cos(x)
assert sec(x).rewrite(tan) == (tan(x/2)**2 + 1)/(-tan(x/2)**2 + 1)
assert sec(x).rewrite(pow) == sec(x)
assert sec(x).rewrite(sqrt) == sec(x)
assert sec(z).rewrite(cot) == (cot(z/2)**2 + 1)/(cot(z/2)**2 - 1)
assert sec(z).conjugate() == sec(conjugate(z))
assert (sec(z).as_real_imag() ==
(cos(re(z))*cosh(im(z))/(sin(re(z))**2*sinh(im(z))**2 +
cos(re(z))**2*cosh(im(z))**2),
sin(re(z))*sinh(im(z))/(sin(re(z))**2*sinh(im(z))**2 +
cos(re(z))**2*cosh(im(z))**2)))
assert sec(x).expand(trig=True) == 1/cos(x)
assert sec(2*x).expand(trig=True) == 1/(2*cos(x)**2 - 1)
assert sec(x).is_real == True
assert sec(z).is_real == None
assert sec(x).as_leading_term() == sec(x)
assert sec(0).is_bounded == True
assert sec(x).is_bounded == None
assert sec(pi/2).is_bounded == False
assert series(sec(x), x, x0=0, n=6) == 1 + x**2/2 + 5*x**4/24 + O(x**6)
# https://github.com/sympy/sympy/issues/7166
assert series(sqrt(sec(x))) == 1 + x**2/4 + 7*x**4/96 + O(x**6)
# https://github.com/sympy/sympy/issues/7167
assert (series(sqrt(sec(x)), x, x0=pi*3/2, n=4) ==
1/sqrt(x - 3*pi/2) + (x - 3*pi/2)**(S(3)/2)/12 +
(x - 3*pi/2)**(S(7)/2)/160 + O((x - 3*pi/2)**4, (x, 3*pi/2)))
assert sec(x).diff(x) == tan(x)*sec(x)
# Taylor Term checks
assert sec(z).taylor_term(4, z) == 5*z**4/24
assert sec(z).taylor_term(6, z) == 61*z**6/720
assert sec(z).taylor_term(5, z) == 0
def test_csc():
x = symbols('x', real=True)
z = symbols('z')
# https://github.com/sympy/sympy/issues/6707
cosecant = csc('x')
alternate = 1/sin('x')
assert cosecant.equals(alternate) == True
assert alternate.equals(cosecant) == True
assert csc.nargs == FiniteSet(1)
assert csc(0) == zoo
assert csc(pi) == zoo
assert csc(pi/2) == 1
assert csc(-pi/2) == -1
assert csc(pi/6) == 2
assert csc(pi/3) == 2*sqrt(3)/3
assert csc(5*pi/2) == 1
assert csc(9*pi/7) == -csc(2*pi/7)
assert csc(I) == -I/sinh(1)
assert csc(x*I) == -I/sinh(x)
assert csc(-x) == -csc(x)
assert csc(acsc(x)) == x
assert csc(x).rewrite(exp) == 2*I/(exp(I*x) - exp(-I*x))
assert csc(x).rewrite(sin) == 1/sin(x)
assert csc(x).rewrite(cos) == csc(x)
assert csc(x).rewrite(tan) == (tan(x/2)**2 + 1)/(2*tan(x/2))
assert csc(x).rewrite(cot) == (cot(x/2)**2 + 1)/(2*cot(x/2))
assert csc(z).conjugate() == csc(conjugate(z))
assert (csc(z).as_real_imag() ==
(sin(re(z))*cosh(im(z))/(sin(re(z))**2*cosh(im(z))**2 +
cos(re(z))**2*sinh(im(z))**2),
-cos(re(z))*sinh(im(z))/(sin(re(z))**2*cosh(im(z))**2 +
cos(re(z))**2*sinh(im(z))**2)))
assert csc(x).expand(trig=True) == 1/sin(x)
assert csc(2*x).expand(trig=True) == 1/(2*sin(x)*cos(x))
assert csc(x).is_real == True
assert csc(z).is_real == None
assert csc(x).as_leading_term() == csc(x)
assert csc(0).is_bounded == False
assert csc(x).is_bounded == None
assert csc(pi/2).is_bounded == True
assert series(csc(x), x, x0=pi/2, n=6) == \
1 + (x - pi/2)**2/2 + 5*(x - pi/2)**4/24 + O((x - pi/2)**6, (x, pi/2))
assert series(csc(x), x, x0=0, n=6) == \
1/x + x/6 + 7*x**3/360 + 31*x**5/15120 + O(x**6)
assert csc(x).diff(x) == -cot(x)*csc(x)
assert csc(x).taylor_term(2, x) == 0
assert csc(x).taylor_term(3, x) == 7*x**3/360
assert csc(x).taylor_term(5, x) == 31*x**5/15120
def test_asec():
assert asec(nan) == nan
assert asec(1) == 0
assert asec(-1) == pi
assert asec(oo) == pi/2
assert asec(-oo) == pi/2
assert asec(zoo) == pi/2
assert asec(x).diff(x) == 1/(x**2*sqrt(1 - 1/x**2))
assert asec(x).as_leading_term(x) == log(x)
assert asec(x).rewrite(log) == I*log(sqrt(1 - 1/x**2) + I/x) + pi/2
assert asec(x).rewrite(asin) == -asin(1/x) + pi/2
assert asec(x).rewrite(acos) == acos(1/x)
assert asec(x).rewrite(atan) == (2*atan(x + sqrt(x**2 - 1)) - pi/2)*sqrt(x**2)/x
assert asec(x).rewrite(acot) == (2*acot(x - sqrt(x**2 - 1)) - pi/2)*sqrt(x**2)/x
assert asec(x).rewrite(acsc) == -acsc(x) + pi/2
def test_acsc():
assert acsc(nan) == nan
assert acsc(1) == pi/2
assert acsc(-1) == -pi/2
assert acsc(oo) == 0
assert acsc(-oo) == 0
assert acsc(zoo) == 0
assert acsc(x).diff(x) == -1/(x**2*sqrt(1 - 1/x**2))
assert acsc(x).as_leading_term(x) == log(x)
assert acsc(x).rewrite(log) == -I*log(sqrt(1 - 1/x**2) + I/x)
assert acsc(x).rewrite(asin) == asin(1/x)
assert acsc(x).rewrite(acos) == -acos(1/x) + pi/2
assert acsc(x).rewrite(atan) == (-atan(sqrt(x**2 - 1)) + pi/2)*sqrt(x**2)/x
assert acsc(x).rewrite(acot) == (-acot(1/sqrt(x**2 - 1)) + pi/2)*sqrt(x**2)/x
assert acsc(x).rewrite(asec) == -asec(x) + pi/2
@XFAIL
@slow
def test_csc_rewrite_failing():
# Move these 2 tests to test_csc() once bugs fixed
# sin(x).rewrite(pow) raises RuntimeError: maximum recursion depth
# https://github.com/sympy/sympy/issues/7171
assert csc(x).rewrite(pow) == csc(x)
assert csc(x).rewrite(sqrt) == csc(x)
|
|
"""
X-Forwarded-For Proxy Fix
=========================
This module provides a middleware that adjusts the WSGI environ based on
``X-Forwarded-`` headers that proxies in front of an application may
set.
When an application is running behind a proxy server, WSGI may see the
request as coming from that server rather than the real client. Proxies
set various headers to track where the request actually came from.
This middleware should only be applied if the application is actually
behind such a proxy, and should be configured with the number of proxies
that are chained in front of it. Not all proxies set all the headers.
Since incoming headers can be faked, you must set how many proxies are
setting each header so the middleware knows what to trust.
.. autoclass:: ProxyFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import warnings
class ProxyFix(object):
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
front of the application may set.
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
``SERVER_PORT``.
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
You must tell the middleware how many proxies set each header so it
knows what values to trust. It is a security issue to trust values
that came from the client rather than a proxy.
The original values of the headers are stored in the WSGI
environ as ``werkzeug.proxy_fix.orig``, a dict.
:param app: The WSGI application to wrap.
:param x_for: Number of values to trust for ``X-Forwarded-For``.
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
:param x_prefix: Number of values to trust for
``X-Forwarded-Prefix``.
:param num_proxies: Deprecated, use ``x_for`` instead.
.. code-block:: python
from werkzeug.middleware.proxy_fix import ProxyFix
# App is behind one proxy that sets the -For and -Host headers.
app = ProxyFix(app, x_for=1, x_host=1)
.. versionchanged:: 0.15
All headers support multiple values. The ``num_proxies``
argument is deprecated. Each header is configured with a
separate number of trusted proxies.
.. versionchanged:: 0.15
Original WSGI environ values are stored in the
``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
and will be removed in 1.0.
.. versionchanged:: 0.15
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
.. versionchanged:: 0.15
``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify
``SERVER_NAME`` and ``SERVER_PORT``.
"""
def __init__(
self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0
):
self.app = app
self.x_for = x_for
self.x_proto = x_proto
self.x_host = x_host
self.x_port = x_port
self.x_prefix = x_prefix
self.num_proxies = num_proxies
@property
def num_proxies(self):
"""The number of proxies setting ``X-Forwarded-For`` in front
of the application.
.. deprecated:: 0.15
A separate number of trusted proxies is configured for each
header. ``num_proxies`` maps to ``x_for``. This method will
be removed in 1.0.
:internal:
"""
warnings.warn(
"'num_proxies' is deprecated as of version 0.15 and will be"
" removed in version 1.0. Use 'x_for' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.x_for
@num_proxies.setter
def num_proxies(self, value):
if value is not None:
warnings.warn(
"'num_proxies' is deprecated as of version 0.15 and"
" will be removed in version 1.0. Use 'x_for' instead.",
DeprecationWarning,
stacklevel=2,
)
self.x_for = value
def get_remote_addr(self, forwarded_for):
"""Get the real ``remote_addr`` by looking backwards ``x_for``
number of values in the ``X-Forwarded-For`` header.
:param forwarded_for: List of values parsed from the
``X-Forwarded-For`` header.
:return: The real ``remote_addr``, or ``None`` if there were not
at least ``x_for`` values.
.. deprecated:: 0.15
This is handled internally for each header. This method will
be removed in 1.0.
.. versionchanged:: 0.9
Use ``num_proxies`` instead of always picking the first
value.
.. versionadded:: 0.8
"""
warnings.warn(
"'get_remote_addr' is deprecated as of version 0.15 and"
" will be removed in version 1.0. It is now handled"
" internally for each header.",
DeprecationWarning,
)
return self._get_trusted_comma(self.x_for, ",".join(forwarded_for))
def _get_trusted_comma(self, trusted, value):
"""Get the real value from a comma-separated header based on the
configured number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionadded:: 0.15
"""
if not (trusted and value):
return
values = [x.strip() for x in value.split(",")]
if len(values) >= trusted:
return values[-trusted]
def __call__(self, environ, start_response):
"""Modify the WSGI environ based on the various ``Forwarded``
headers before calling the wrapped application. Store the
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
"""
environ_get = environ.get
orig_remote_addr = environ_get("REMOTE_ADDR")
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
orig_http_host = environ_get("HTTP_HOST")
environ.update(
{
"werkzeug.proxy_fix.orig": {
"REMOTE_ADDR": orig_remote_addr,
"wsgi.url_scheme": orig_wsgi_url_scheme,
"HTTP_HOST": orig_http_host,
"SERVER_NAME": environ_get("SERVER_NAME"),
"SERVER_PORT": environ_get("SERVER_PORT"),
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
},
# todo: remove deprecated keys
"werkzeug.proxy_fix.orig_remote_addr": orig_remote_addr,
"werkzeug.proxy_fix.orig_wsgi_url_scheme": orig_wsgi_url_scheme,
"werkzeug.proxy_fix.orig_http_host": orig_http_host,
}
)
x_for = self._get_trusted_comma(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
if x_for:
environ["REMOTE_ADDR"] = x_for
x_proto = self._get_trusted_comma(
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
)
if x_proto:
environ["wsgi.url_scheme"] = x_proto
x_host = self._get_trusted_comma(
self.x_host, environ_get("HTTP_X_FORWARDED_HOST")
)
if x_host:
environ["HTTP_HOST"] = x_host
parts = x_host.split(":", 1)
environ["SERVER_NAME"] = parts[0]
if len(parts) == 2:
environ["SERVER_PORT"] = parts[1]
x_port = self._get_trusted_comma(
self.x_port, environ_get("HTTP_X_FORWARDED_PORT")
)
if x_port:
host = environ.get("HTTP_HOST")
if host:
parts = host.split(":", 1)
host = parts[0] if len(parts) == 2 else host
environ["HTTP_HOST"] = "%s:%s" % (host, x_port)
environ["SERVER_PORT"] = x_port
x_prefix = self._get_trusted_comma(
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
)
if x_prefix:
environ["SCRIPT_NAME"] = x_prefix
return self.app(environ, start_response)
|
|
"""
Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asarray, asanyarray, result_type
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Paramaters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
comb_iter = []
for x in range(len(input_sets) - iteration):
for y in range(x + 1, len(input_sets) - iteration):
comb_iter.append((x, y))
for curr in full_results:
cost, positions, remaining = curr
for con in comb_iter:
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Find cost
new_cost = _compute_size_by_dict(idx_contract, idx_dict)
if idx_removed:
new_cost *= 2
# Build (total_cost, positions, indices_remaining)
new_cost += cost
new_pos = positions + [con]
iter_results.append((new_cost, new_pos, new_input_sets))
# Update list to iterate over
full_results = iter_results
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
if len(input_sets) == 1:
return [(0,)]
path = []
for iteration in range(len(input_sets) - 1):
iteration_results = []
comb_iter = []
# Compute all unique pairs
for x in range(len(input_sets)):
for y in range(x + 1, len(input_sets)):
comb_iter.append((x, y))
for positions in comb_iter:
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
if _compute_size_by_dict(idx_result, idx_dict) > memory_limit:
continue
# Build sort tuple
removed_size = _compute_size_by_dict(idx_removed, idx_dict)
cost = _compute_size_by_dict(idx_contract, idx_dict)
sort = (-removed_size, cost)
# Add contraction to possible choices
iteration_results.append([sort, positions, new_input_sets])
# If we did not find a new contraction contract remaining
if len(iteration_results) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(iteration_results, key=lambda x: x[0])
path.append(best[1])
input_sets = best[2]
return path
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> __parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(len(operands[num].shape), 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as reprsented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize', 'einsum_call']
unknown_kwargs = [k for (k, v) in kwargs.items() if k
not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs:"
" %s" % unknown_kwargs)
# Figure out what the path really is
path_type = kwargs.pop('optimize', False)
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = kwargs.pop("einsum_call", False)
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d.",
input_subscripts[tnum], tnum)
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
if dimension_dict[char] != dim:
raise ValueError("Size of label '%s' for operand %d does "
"not match previous terms.", char, tnum)
else:
dimension_dict[char] = dim
# Compute size of each input array plus the output array
size_list = []
for term in input_list + [output_subscript]:
size_list.append(_compute_size_by_dict(term, dimension_dict))
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isnt quite right, need to look into exactly how einsum does this
naive_cost = _compute_size_by_dict(indices, dimension_dict)
indices_in_input = input_subscripts.replace(',', '')
mult = max(len(input_list) - 1, 1)
if (len(indices_in_input) - len(set(indices_in_input))):
mult *= 2
naive_cost *= mult
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
# Maximum memory should be at most out_size for this algorithm
memory_arg = min(memory_arg, max_size)
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _compute_size_by_dict(idx_contract, dimension_dict)
if idx_removed:
cost *= 2
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:])
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
# Rewrite einsum to handle different cases
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : {ndarray, None}, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Default is False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
See ``np.einsum_path`` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
# Grab non-einsum kwargs
optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if
k in valid_einsum_kwargs}
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Special handeling if out is specified
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if out_array is not None:
specified_out = True
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
# If out was specified
if specified_out and ((num + 1) == len(contraction_list)):
einsum_kwargs["out"] = out_array
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and derefernce what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0]
|
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import md5
import unittest
import uuid
import shutil
import random
from collections import defaultdict
from test.probe.common import ECProbeTest
from swift.common import direct_client
from swift.common.storage_policy import EC_POLICY
from swift.common.manager import Manager
from swift.obj.reconstructor import _get_partners
from swiftclient import client
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.total = total
self.hasher = md5()
self.size = 0
self.chunk = 'test' * 16 * 2 ** 10
@property
def etag(self):
return self.hasher.hexdigest()
def __iter__(self):
return self
def next(self):
if self.size > self.total:
raise StopIteration()
self.size += len(self.chunk)
self.hasher.update(self.chunk)
return self.chunk
def __next__(self):
return next(self)
class TestReconstructorRebuild(ECProbeTest):
def setUp(self):
super(TestReconstructorRebuild, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
# sanity
self.assertEqual(self.policy.policy_type, EC_POLICY)
self.reconstructor = Manager(["object-reconstructor"])
def proxy_get(self):
# GET object
headers, body = client.get_object(self.url, self.token,
self.container_name,
self.object_name,
resp_chunk_size=64 * 2 ** 10)
resp_checksum = md5()
for chunk in body:
resp_checksum.update(chunk)
return resp_checksum.hexdigest()
def direct_get(self, node, part):
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
headers, data = direct_client.direct_get_object(
node, part, self.account, self.container_name,
self.object_name, headers=req_headers,
resp_chunk_size=64 * 2 ** 20)
hasher = md5()
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _check_node(self, node, part, etag, headers_post):
# get fragment archive etag
fragment_archive_etag = self.direct_get(node, part)
# remove data from the selected node
part_dir = self.storage_dir('object', node, part=part)
shutil.rmtree(part_dir, True)
# this node can't servce the data any more
try:
self.direct_get(node, part)
except direct_client.DirectClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail('Node data on %r was not fully destoryed!' %
(node,))
# make sure we can still GET the object and its correct, the
# proxy is doing decode on remaining fragments to get the obj
self.assertEqual(etag, self.proxy_get())
# fire up reconstructor
self.reconstructor.once()
# fragment is rebuilt exactly as it was before!
self.assertEqual(fragment_archive_etag,
self.direct_get(node, part))
# check meta
meta = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
for key in headers_post:
self.assertTrue(key in meta)
self.assertEqual(meta[key], headers_post[key])
def _format_node(self, node):
return '%s#%s' % (node['device'], node['index'])
def test_main(self):
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# PUT object
contents = Body()
headers = {'x-object-meta-foo': 'meta-foo'}
headers_post = {'x-object-meta-bar': 'meta-bar'}
etag = client.put_object(self.url, self.token,
self.container_name,
self.object_name,
contents=contents, headers=headers)
client.post_object(self.url, self.token, self.container_name,
self.object_name, headers=headers_post)
del headers_post['X-Auth-Token'] # WTF, where did this come from?
# built up a list of node lists to kill data from,
# first try a single node
# then adjacent nodes and then nodes >1 node apart
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
single_node = [random.choice(onodes)]
adj_nodes = [onodes[0], onodes[-1]]
far_nodes = [onodes[0], onodes[-2]]
test_list = [single_node, adj_nodes, far_nodes]
for node_list in test_list:
for onode in node_list:
try:
self._check_node(onode, opart, etag, headers_post)
except AssertionError as e:
self.fail(
str(e) + '\n... for node %r of scenario %r' % (
self._format_node(onode),
[self._format_node(n) for n in node_list]))
def test_rebuild_partner_down(self):
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
# PUT object
contents = Body()
client.put_object(self.url, self.token,
self.container_name,
self.object_name,
contents=contents)
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
# find a primary server that only has one of it's devices in the
# primary node list
group_nodes_by_config = defaultdict(list)
for n in onodes:
group_nodes_by_config[self.config_number(n)].append(n)
for config_number, node_list in group_nodes_by_config.items():
if len(node_list) == 1:
break
else:
self.fail('ring balancing did not use all available nodes')
primary_node = node_list[0]
# pick one it's partners to fail randomly
partner_node = random.choice(_get_partners(
primary_node['index'], onodes))
# 507 the partner device
device_path = self.device_dir('object', partner_node)
self.kill_drive(device_path)
# select another primary sync_to node to fail
failed_primary = [n for n in onodes if n['id'] not in
(primary_node['id'], partner_node['id'])][0]
# ... capture it's fragment etag
failed_primary_etag = self.direct_get(failed_primary, opart)
# ... and delete it
part_dir = self.storage_dir('object', failed_primary, part=opart)
shutil.rmtree(part_dir, True)
# reconstruct from the primary, while one of it's partners is 507'd
self.reconstructor.once(number=self.config_number(primary_node))
# the other failed primary will get it's fragment rebuilt instead
self.assertEqual(failed_primary_etag,
self.direct_get(failed_primary, opart))
# just to be nice
self.revive_drive(device_path)
if __name__ == "__main__":
unittest.main()
|
|
# -*- test-case-name: twisted.conch.test.test_conch -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""The lowest level SSH protocol. This handles the key negotiation, the encryption and the compression.
This module is unstable.
Maintainer: U{Paul Swartz<mailto:[email protected]>}
"""
from __future__ import nested_scopes
# base library imports
import struct
import md5
import sha
import zlib
import math # for math.log
import array
# external library imports
from Crypto import Util
from Crypto.Cipher import XOR
from Crypto.PublicKey import RSA
from Crypto.Util import randpool
# twisted imports
from twisted.conch import error
from twisted.internet import protocol, defer
from twisted.python import log
# sibling importsa
from common import NS, getNS, MP, getMP, _MPpow, ffs, entropy # ease of use
import keys
class SSHTransportBase(protocol.Protocol):
protocolVersion = '2.0'
version = 'Twisted'
comment = ''
ourVersionString = ('SSH-'+protocolVersion+'-'+version+' '+comment).strip()
supportedCiphers = ['aes256-ctr', 'aes256-cbc', 'aes192-ctr', 'aes192-cbc',
'aes128-ctr', 'aes128-cbc', 'cast128-ctr',
'cast128-cbc', 'blowfish-ctr', 'blowfish', 'idea-ctr'
'idea-cbc', '3des-ctr', '3des-cbc'] # ,'none']
supportedMACs = ['hmac-sha1', 'hmac-md5'] # , 'none']
# both of the above support 'none', but for security are disabled by
# default. to enable them, subclass this class and add it, or do:
# SSHTransportBase.supportedCiphers.append('none')
supportedKeyExchanges = ['diffie-hellman-group-exchange-sha1',
'diffie-hellman-group1-sha1']
supportedPublicKeys = ['ssh-rsa', 'ssh-dss']
supportedCompressions = ['none', 'zlib']
supportedLanguages = ()
gotVersion = 0
ignoreNextPacket = 0
buf = ''
outgoingPacketSequence = 0
incomingPacketSequence = 0
currentEncryptions = None
outgoingCompression = None
incomingCompression = None
sessionID = None
isAuthorized = 0
service = None
def connectionLost(self, reason):
if self.service:
self.service.serviceStopped()
if hasattr(self, 'avatar'):
self.logoutFunction()
log.msg('connection lost')
def connectionMade(self):
self.transport.write('%s\r\n'%(self.ourVersionString))
self.sendKexInit()
def sendKexInit(self):
self.ourKexInitPayload = chr(MSG_KEXINIT)+entropy.get_bytes(16)+ \
NS(','.join(self.supportedKeyExchanges))+ \
NS(','.join(self.supportedPublicKeys))+ \
NS(','.join(self.supportedCiphers))+ \
NS(','.join(self.supportedCiphers))+ \
NS(','.join(self.supportedMACs))+ \
NS(','.join(self.supportedMACs))+ \
NS(','.join(self.supportedCompressions))+ \
NS(','.join(self.supportedCompressions))+ \
NS(','.join(self.supportedLanguages))+ \
NS(','.join(self.supportedLanguages))+ \
'\000'+'\000\000\000\000'
self.sendPacket(MSG_KEXINIT, self.ourKexInitPayload[1:])
def sendPacket(self, messageType, payload):
payload = chr(messageType)+payload
if self.outgoingCompression:
payload = self.outgoingCompression.compress(payload) + self.outgoingCompression.flush(2)
if self.currentEncryptions:
bs = self.currentEncryptions.enc_block_size
else:
bs = 8
totalSize = 5+len(payload)
lenPad = bs-(totalSize%bs)
if lenPad < 4:
lenPad = lenPad+bs
packet = struct.pack('!LB', totalSize+lenPad-4, lenPad)+ \
payload+entropy.get_bytes(lenPad)
assert len(packet)%bs == 0, '%s extra bytes in packet'%(len(packet)%bs)
if self.currentEncryptions:
encPacket = self.currentEncryptions.encrypt(packet) + self.currentEncryptions.makeMAC(self.outgoingPacketSequence, packet)
else:
encPacket = packet
self.transport.write(encPacket)
self.outgoingPacketSequence+=1
def getPacket(self):
bs = self.currentEncryptions and self.currentEncryptions.dec_block_size or 8
ms = self.currentEncryptions and self.currentEncryptions.verify_digest_size or 0
if len(self.buf) < bs: return # not enough data
if not hasattr(self, 'first'):
if self.currentEncryptions:
first = self.currentEncryptions.decrypt(self.buf[: bs])
else:
first = self.buf[: bs]
else:
first = self.first
del self.first
packetLen, randomLen = struct.unpack('!LB', first[: 5])
if packetLen > 1048576: # 1024 ** 2
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR, 'bad packet length %s'%packetLen)
return
if len(self.buf) < packetLen+4+ms:
self.first = first
return # not enough packet
if(packetLen+4)%bs != 0:
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR, 'bad packet mod (%s%%%s == %s'%(packetLen+4, bs, (packetLen+4)%bs))
return
encData, self.buf = self.buf[: 4+packetLen], self.buf[4+packetLen:]
if self.currentEncryptions:
packet = first+self.currentEncryptions.decrypt(encData[bs:])
else:
packet = encData
if len(packet) != 4+packetLen:
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR, 'bad packet length')
return
if ms:
macData, self.buf = self.buf[:ms], self.buf[ms:]
if not self.currentEncryptions.verify(self.incomingPacketSequence, packet, macData):
self.sendDisconnect(DISCONNECT_MAC_ERROR, 'bad MAC')
return
payload = packet[5: 4+packetLen-randomLen]
if self.incomingCompression:
try:
payload = self.incomingCompression.decompress(payload)
except zlib.error:
self.sendDisconnect(DISCONNECT_COMPRESSION_ERROR, 'compression error')
return
self.incomingPacketSequence+=1
return payload
def dataReceived(self, data):
self.buf = self.buf+data
if not self.gotVersion:
parts = self.buf.split('\n')
for p in parts:
if p[: 4] == 'SSH-':
self.gotVersion = 1
self.otherVersionString = p.strip()
if p.split('-')[1]not in('1.99', '2.0'): # bad version
self.sendDisconnect(DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED, 'bad version %s'%p.split('-')[1])
return
i = parts.index(p)
self.buf = '\n'.join(parts[i+1:])
packet = self.getPacket()
while packet:
messageNum = ord(packet[0])
if messageNum < 50:
messageType = messages[messageNum][4:]
f = getattr(self, 'ssh_%s'%messageType, None)
if f:
f(packet[1:])
else:
log.msg("couldn't handle %s"%messageType)
log.msg(repr(packet[1:]))
self.sendUnimplemented()
elif self.service:
log.callWithLogger(self.service, self.service.packetReceived,
ord(packet[0]), packet[1:])
else:
log.msg("couldn't handle %s"%messageNum)
log.msg(repr(packet[1:]))
self.sendUnimplemented()
packet = self.getPacket()
def ssh_DISCONNECT(self, packet):
reasonCode = struct.unpack('>L', packet[: 4])[0]
description, foo = getNS(packet[4:])
self.receiveError(reasonCode, description)
self.transport.loseConnection()
def ssh_IGNORE(self, packet): pass
def ssh_UNIMPLEMENTED(self, packet):
seqnum = struct.unpack('>L', packet)
self.receiveUnimplemented(seqnum)
def ssh_DEBUG(self, packet):
alwaysDisplay = ord(packet[0])
message, lang, foo = getNS(packet, 2)
self.receiveDebug(alwaysDisplay, message, lang)
def setService(self, service):
log.msg('starting service %s'%service.name)
if self.service:
self.service.serviceStopped()
self.service = service
service.transport = self
self.service.serviceStarted()
def sendDebug(self, message, alwaysDisplay = 0, language = ''):
self.sendPacket(MSG_DEBUG, chr(alwaysDisplay)+NS(message)+NS(language))
def sendIgnore(self, message):
self.sendPacket(MSG_IGNORE, NS(message))
def sendUnimplemented(self):
seqnum = self.incomingPacketSequence
self.sendPacket(MSG_UNIMPLEMENTED, struct.pack('!L', seqnum))
def sendDisconnect(self, reason, desc):
self.sendPacket(MSG_DISCONNECT, struct.pack('>L', reason)+NS(desc)+NS(''))
log.msg('Disconnecting with error, code %s\nreason: %s'%(reason, desc))
self.transport.loseConnection()
# client methods
def receiveError(self, reasonCode, description):
log.msg('Got remote error, code %s\nreason: %s'%(reasonCode, description))
def receiveUnimplemented(self, seqnum):
log.msg('other side unimplemented packet #%s'%seqnum)
def receiveDebug(self, alwaysDisplay, message, lang):
if alwaysDisplay:
log.msg('Remote Debug Message:', message)
def isEncrypted(self, direction = "out"):
"""direction must be in ["out", "in", "both"]
"""
if self.currentEncryptions == None:
return 0
elif direction == "out":
return bool(self.currentEncryptions.enc_block_size)
elif direction == "in":
return bool(self.currentEncryptions.dec_block_size)
elif direction == "both":
return self.isEncrypted("in") and self.isEncrypted("out")
else:
raise TypeError, 'direction must be "out", "in", or "both"'
def isVerified(self, direction = "out"):
"""direction must be in ["out", "in", "both"]
"""
if self.currentEncryptions == None:
return 0
elif direction == "out":
return self.currentEncryptions.outMAC != None
elif direction == "in":
return self.currentEncryptions.outCMAC != None
elif direction == "both":
return self.isVerified("in")and self.isVerified("out")
else:
raise TypeError, 'direction must be "out", "in", or "both"'
def loseConnection(self):
self.sendDisconnect(DISCONNECT_CONNECTION_LOST, "user closed connection")
class SSHServerTransport(SSHTransportBase):
isClient = 0
def ssh_KEXINIT(self, packet):
self.clientKexInitPayload = chr(MSG_KEXINIT)+packet
#cookie = packet[: 16] # taking this is useless
k = getNS(packet[16:], 10)
strings, rest = k[:-1], k[-1]
kexAlgs, keyAlgs, encCS, encSC, macCS, macSC, compCS, compSC, langCS, langSC = \
[s.split(',')for s in strings]
if ord(rest[0]): # first_kex_packet_follows
if kexAlgs[0] != self.supportedKeyExchanges[0]or \
keyAlgs[0] != self.supportedPublicKeys[0]or \
not ffs(encSC, self.supportedCiphers)or \
not ffs(encCS, self.supportedCiphers)or \
not ffs(macSC, self.supportedMACs)or \
not ffs(macCS, self.supportedMACs)or \
not ffs(compCS, self.supportedCompressions)or \
not ffs(compSC, self.supportedCompressions):
self.ignoreNextPacket = 1 # guess was wrong
self.kexAlg = ffs(kexAlgs, self.supportedKeyExchanges)
self.keyAlg = ffs(keyAlgs, self.supportedPublicKeys)
self.nextEncryptions = SSHCiphers(
ffs(encSC, self.supportedCiphers),
ffs(encCS, self.supportedCiphers),
ffs(macSC, self.supportedMACs),
ffs(macCS, self.supportedMACs),
)
self.outgoingCompressionType = ffs(compSC, self.supportedCompressions)
self.incomingCompressionType = ffs(compCS, self.supportedCompressions)
if None in(self.kexAlg, self.keyAlg, self.outgoingCompressionType, self.incomingCompressionType):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED, "couldn't match all kex parts")
return
if None in self.nextEncryptions.__dict__.values():
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED, "couldn't match all kex parts")
return
log.msg('kex alg, key alg: %s %s'%(self.kexAlg, self.keyAlg))
log.msg('server->client: %s %s %s'%(self.nextEncryptions.outCipType,
self.nextEncryptions.outMacType,
self.outgoingCompressionType))
log.msg('client->server: %s %s %s'%(self.nextEncryptions.inCipType,
self.nextEncryptions.inMacType,
self.incomingCompressionType))
def ssh_KEX_DH_GEX_REQUEST_OLD(self, packet):
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
if self.kexAlg == 'diffie-hellman-group1-sha1': # this is really KEXDH_INIT
clientDHPubKey, foo = getMP(packet)
y = Util.number.getRandomNumber(16, entropy.get_bytes)
f = pow(DH_GENERATOR, y, DH_PRIME)
sharedSecret = _MPpow(clientDHPubKey, y, DH_PRIME)
h = sha.new()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.clientKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.factory.publicKeys[self.keyAlg]))
h.update(MP(clientDHPubKey))
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(MSG_KEXDH_REPLY, NS(self.factory.publicKeys[self.keyAlg])+ \
MP(f)+NS(keys.signData(self.factory.privateKeys[self.keyAlg], exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
elif self.kexAlg == 'diffie-hellman-group-exchange-sha1':
self.kexAlg = 'diffie-hellman-group-exchange-sha1-old'
self.ideal = struct.unpack('>L', packet)[0]
self.g, self.p = self.factory.getDHPrime(self.ideal)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p)+MP(self.g))
else:
raise error.ConchError('bad kexalg: %s'%self.kexAlg)
def ssh_KEX_DH_GEX_REQUEST(self, packet):
if self.ignoreNextPacket:
self.ignoreNextPacket = 0
return
self.min, self.ideal, self.max = struct.unpack('>3L', packet)
self.g, self.p = self.factory.getDHPrime(self.ideal)
self.sendPacket(MSG_KEX_DH_GEX_GROUP, MP(self.p)+MP(self.g))
def ssh_KEX_DH_GEX_INIT(self, packet):
clientDHPubKey, foo = getMP(packet)
# if y < 1024, openssh will reject us: "bad server public DH value".
# y<1024 means f will be short, and of the form 2^y, so an observer
# could trivially derive our secret y from f. Openssh detects this
# and complains, so avoid creating such values by requiring y to be
# larger than ln2(self.p)
# TODO: we should also look at the value they send to us and reject
# insecure values of f (if g==2 and f has a single '1' bit while the
# rest are '0's, then they must have used a small y also).
# TODO: This could be computed when self.p is set up
# or do as openssh does and scan f for a single '1' bit instead
minimum = long(math.floor(math.log(self.p) / math.log(2)) + 1)
tries = 0
pSize = Util.number.size(self.p)
y = Util.number.getRandomNumber(pSize, entropy.get_bytes)
while tries < 10 and y < minimum:
tries += 1
y = Util.number.getRandomNumber(pSize, entropy.get_bytes)
assert(y >= minimum) # TODO: test_conch just hangs if this is hit
# the chance of it being hit are really really low
f = pow(self.g, y, self.p)
sharedSecret = _MPpow(clientDHPubKey, y, self.p)
h = sha.new()
h.update(NS(self.otherVersionString))
h.update(NS(self.ourVersionString))
h.update(NS(self.clientKexInitPayload))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.factory.publicKeys[self.keyAlg]))
if self.kexAlg == 'diffie-hellman-group-exchange-sha1':
h.update(struct.pack('>3L', self.min, self.ideal, self.max))
else:
h.update(struct.pack('>L', self.ideal))
h.update(MP(self.p))
h.update(MP(self.g))
h.update(MP(clientDHPubKey))
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
self.sendPacket(MSG_KEX_DH_GEX_REPLY, NS(self.factory.publicKeys[self.keyAlg])+ \
MP(f)+NS(keys.signData(self.factory.privateKeys[self.keyAlg], exchangeHash)))
self._keySetup(sharedSecret, exchangeHash)
def ssh_NEWKEYS(self, packet):
if packet != '':
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR, "NEWKEYS takes no data")
self.currentEncryptions = self.nextEncryptions
if self.outgoingCompressionType == 'zlib':
self.outgoingCompression = zlib.compressobj(6)
#self.outgoingCompression.compress = lambda x: self.outgoingCompression.compress(x) + self.outgoingCompression.flush(zlib.Z_SYNC_FLUSH)
if self.incomingCompressionType == 'zlib':
self.incomingCompression = zlib.decompressobj()
def ssh_SERVICE_REQUEST(self, packet):
service, rest = getNS(packet)
cls = self.factory.getService(self, service)
if not cls:
self.sendDisconnect(DISCONNECT_SERVICE_NOT_AVAILABLE, "don't have service %s"%service)
return
else:
self.sendPacket(MSG_SERVICE_ACCEPT, NS(service))
self.setService(cls())
def _keySetup(self, sharedSecret, exchangeHash):
if not self.sessionID:
self.sessionID = exchangeHash
initIVCS = self._getKey('A', sharedSecret, exchangeHash)
initIVSC = self._getKey('B', sharedSecret, exchangeHash)
encKeyCS = self._getKey('C', sharedSecret, exchangeHash)
encKeySC = self._getKey('D', sharedSecret, exchangeHash)
integKeyCS = self._getKey('E', sharedSecret, exchangeHash)
integKeySC = self._getKey('F', sharedSecret, exchangeHash)
self.nextEncryptions.setKeys(initIVSC, encKeySC, initIVCS, encKeyCS, integKeySC, integKeyCS)
self.sendPacket(MSG_NEWKEYS, '')
def _getKey(self, c, sharedSecret, exchangeHash):
k1 = sha.new(sharedSecret+exchangeHash+c+self.sessionID).digest()
k2 = sha.new(sharedSecret+exchangeHash+k1).digest()
return k1+k2
class SSHClientTransport(SSHTransportBase):
isClient = 1
def connectionMade(self):
SSHTransportBase.connectionMade(self)
self._gotNewKeys = 0
def ssh_KEXINIT(self, packet):
self.serverKexInitPayload = chr(MSG_KEXINIT)+packet
#cookie = packet[: 16] # taking this is unimportant
k = getNS(packet[16:], 10)
strings, rest = k[:-1], k[-1]
kexAlgs, keyAlgs, encCS, encSC, macCS, macSC, compCS, compSC, langCS, langSC = \
[s.split(',')for s in strings]
self.kexAlg = ffs(self.supportedKeyExchanges, kexAlgs)
self.keyAlg = ffs(self.supportedPublicKeys, keyAlgs)
self.nextEncryptions = SSHCiphers(
ffs(self.supportedCiphers, encCS),
ffs(self.supportedCiphers, encSC),
ffs(self.supportedMACs, macCS),
ffs(self.supportedMACs, macSC),
)
self.outgoingCompressionType = ffs(self.supportedCompressions, compCS)
self.incomingCompressionType = ffs(self.supportedCompressions, compSC)
if None in(self.kexAlg, self.keyAlg, self.outgoingCompressionType, self.incomingCompressionType):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED, "couldn't match all kex parts")
return
if None in self.nextEncryptions.__dict__.values():
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED, "couldn't match all kex parts")
return
log.msg('kex alg, key alg: %s %s'%(self.kexAlg, self.keyAlg))
log.msg('client->server: %s %s %s'%(self.nextEncryptions.outCipType,
self.nextEncryptions.outMacType,
self.outgoingCompressionType))
log.msg('server->client: %s %s %s'%(self.nextEncryptions.inCipType,
self.nextEncryptions.inMacType,
self.incomingCompressionType))
if self.kexAlg == 'diffie-hellman-group1-sha1':
self.x = Util.number.getRandomNumber(512, entropy.get_bytes)
self.DHpubKey = pow(DH_GENERATOR, self.x, DH_PRIME)
self.sendPacket(MSG_KEXDH_INIT, MP(self.DHpubKey))
else:
self.sendPacket(MSG_KEX_DH_GEX_REQUEST_OLD, '\x00\x00\x08\x00')
def ssh_KEX_DH_GEX_GROUP(self, packet):
if self.kexAlg == 'diffie-hellman-group1-sha1':
pubKey, packet = getNS(packet)
f, packet = getMP(packet)
signature, packet = getNS(packet)
fingerprint = ':'.join(map(lambda c: '%02x'%ord(c), md5.new(pubKey).digest()))
d = self.verifyHostKey(pubKey, fingerprint)
d.addCallback(self._continueGEX_GROUP, pubKey, f, signature)
d.addErrback(lambda unused,self=self:self.sendDisconnect(DISCONNECT_HOST_KEY_NOT_VERIFIABLE, 'bad host key'))
else:
self.p, rest = getMP(packet)
self.g, rest = getMP(rest)
self.x = getMP('\x00\x00\x00\x40'+entropy.get_bytes(64))[0]
self.DHpubKey = pow(self.g, self.x, self.p)
self.sendPacket(MSG_KEX_DH_GEX_INIT, MP(self.DHpubKey))
def _continueGEX_GROUP(self, ignored, pubKey, f, signature):
serverKey = keys.getPublicKeyObject(pubKey)
sharedSecret = _MPpow(f, self.x, DH_PRIME)
h = sha.new()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.serverKexInitPayload))
h.update(NS(pubKey))
h.update(MP(self.DHpubKey))
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
if not keys.verifySignature(serverKey, signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED, 'bad signature')
return
self._keySetup(sharedSecret, exchangeHash)
def ssh_KEX_DH_GEX_REPLY(self, packet):
pubKey, packet = getNS(packet)
f, packet = getMP(packet)
signature, packet = getNS(packet)
fingerprint = ':'.join(map(lambda c: '%02x'%ord(c), md5.new(pubKey).digest()))
d = self.verifyHostKey(pubKey, fingerprint)
d.addCallback(self._continueGEX_REPLY, pubKey, f, signature)
d.addErrback(lambda unused, self=self: self.sendDisconnect(DISCONNECT_HOST_KEY_NOT_VERIFIABLE, 'bad host key'))
def _continueGEX_REPLY(self, ignored, pubKey, f, signature):
serverKey = keys.getPublicKeyObject(pubKey)
sharedSecret = _MPpow(f, self.x, self.p)
h = sha.new()
h.update(NS(self.ourVersionString))
h.update(NS(self.otherVersionString))
h.update(NS(self.ourKexInitPayload))
h.update(NS(self.serverKexInitPayload))
h.update(NS(pubKey))
h.update('\x00\x00\x08\x00')
h.update(MP(self.p))
h.update(MP(self.g))
h.update(MP(self.DHpubKey))
h.update(MP(f))
h.update(sharedSecret)
exchangeHash = h.digest()
if not keys.verifySignature(serverKey, signature, exchangeHash):
self.sendDisconnect(DISCONNECT_KEY_EXCHANGE_FAILED, 'bad signature')
return
self._keySetup(sharedSecret, exchangeHash)
def _keySetup(self, sharedSecret, exchangeHash):
if not self.sessionID:
self.sessionID = exchangeHash
initIVCS = self._getKey('A', sharedSecret, exchangeHash)
initIVSC = self._getKey('B', sharedSecret, exchangeHash)
encKeyCS = self._getKey('C', sharedSecret, exchangeHash)
encKeySC = self._getKey('D', sharedSecret, exchangeHash)
integKeyCS = self._getKey('E', sharedSecret, exchangeHash)
integKeySC = self._getKey('F', sharedSecret, exchangeHash)
self.nextEncryptions.setKeys(initIVCS, encKeyCS, initIVSC, encKeySC, integKeyCS, integKeySC)
self.sendPacket(MSG_NEWKEYS, '')
if self._gotNewKeys:
self.ssh_NEWKEYS('')
def _getKey(self, c, sharedSecret, exchangeHash):
k1 = sha.new(sharedSecret+exchangeHash+c+self.sessionID).digest()
k2 = sha.new(sharedSecret+exchangeHash+k1).digest()
return k1+k2
def ssh_NEWKEYS(self, packet):
if packet != '':
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR, "NEWKEYS takes no data")
if not self.nextEncryptions.enc_block_size:
self._gotNewKeys = 1
return
self.currentEncryptions = self.nextEncryptions
if self.outgoingCompressionType == 'zlib':
self.outgoingCompression = zlib.compressobj(6)
#self.outgoingCompression.compress = lambda x: self.outgoingCompression.compress(x) + self.outgoingCompression.flush(zlib.Z_SYNC_FLUSH)
if self.incomingCompressionType == 'zlib':
self.incomingCompression = zlib.decompressobj()
self.connectionSecure()
def ssh_SERVICE_ACCEPT(self, packet):
name = getNS(packet)[0]
if name != self.instance.name:
self.sendDisconnect(DISCONNECT_PROTOCOL_ERROR, "received accept for service we did not request")
self.setService(self.instance)
def requestService(self, instance):
"""
Request that a service be run over this transport.
@type instance: subclass of L{twisted.conch.ssh.service.SSHService}
"""
self.sendPacket(MSG_SERVICE_REQUEST, NS(instance.name))
self.instance = instance
# client methods
def verifyHostKey(self, hostKey, fingerprint):
"""Returns a Deferred that gets a callback if it is a valid key, or
an errback if not.
@type hostKey: C{str}
@type fingerprint: C{str}
@rtype: L{Deferred}
"""
# return if it's good
return defer.fail(NotImplementedError)
def connectionSecure(self):
"""
Called when the encryption has been set up. Generally,
requestService() is called to run another service over the transport.
"""
raise NotImplementedError
class _DummyCipher:
block_size = 1
def encrypt(self, x):
return x
decrypt = encrypt
class SSHCiphers:
cipherMap = {
'3des-cbc':('DES3', 24, 0),
'blowfish-cbc':('Blowfish', 16,0 ),
'aes256-cbc':('AES', 32, 0),
'aes192-cbc':('AES', 24, 0),
'aes128-cbc':('AES', 16, 0),
'arcfour':('ARC4', 16, 0),
'idea-cbc':('IDEA', 16, 0),
'cast128-cbc':('CAST', 16, 0),
'aes128-ctr':('AES', 16, 1),
'aes192-ctr':('AES', 24, 1),
'aes256-ctr':('AES', 32, 1),
'3des-ctr':('DES3', 24, 1),
'blowfish-ctr':('Blowfish', 16, 1),
'idea-ctr':('IDEA', 16, 1),
'cast128-ctr':('CAST', 16, 1),
'none':(None, 0, 0),
}
macMap = {
'hmac-sha1': 'sha',
'hmac-md5': 'md5',
'none':None
}
def __init__(self, outCip, inCip, outMac, inMac):
self.outCipType = outCip
self.inCipType = inCip
self.outMacType = outMac
self.inMacType = inMac
self.enc_block_size = 0
self.dec_block_size = 0
def setKeys(self, outIV, outKey, inIV, inKey, outInteg, inInteg):
o = self._getCipher(self.outCipType, outIV, outKey)
self.encrypt = o.encrypt
self.enc_block_size = o.block_size
o = self._getCipher(self.inCipType, inIV, inKey)
self.decrypt = o.decrypt
self.dec_block_size = o.block_size
self.outMAC = self._getMAC(self.outMacType, outInteg)
self.inMAC = self._getMAC(self.inMacType, inInteg)
self.verify_digest_size = self.inMAC[3]
def _getCipher(self, cip, iv, key):
modName, keySize, counterMode = self.cipherMap[cip]
if not modName: # no cipher
return _DummyCipher()
mod = __import__('Crypto.Cipher.%s'%modName, {}, {}, 'x')
if counterMode:
return mod.new(key[:keySize], mod.MODE_CTR, iv[:mod.block_size], counter=_Counter(iv, mod.block_size))
else:
return mod.new(key[: keySize], mod.MODE_CBC, iv[: mod.block_size])
def _getMAC(self, mac, key):
modName = self.macMap[mac]
if not modName:
return None
mod = __import__(modName, {}, {}, '')
if not hasattr(mod, 'digest_size'):
ds = len(mod.new().digest())
else:
ds = mod.digest_size
key = key[: ds]+'\x00'*(64-ds)
i = XOR.new('\x36').encrypt(key)
o = XOR.new('\x5c').encrypt(key)
return mod, i,o, ds
def encrypt(self, blocks):
return blocks
def decrypt(self, blocks):
return blocks
def makeMAC(self, seqid, data):
if not self.outMAC: return ''
data = struct.pack('>L', seqid)+data
mod, i, o, ds = self.outMAC
inner = mod.new(i+data)
outer = mod.new(o+inner.digest())
return outer.digest()
def verify(self, seqid, data, mac):
if not self.inMAC:
return mac == ''
data = struct.pack('>L', seqid)+data
mod, i,o, ds = self.inMAC
inner = mod.new(i+data)
outer = mod.new(o+inner.digest())
return mac == outer.digest()
class _Counter:
"""
Stateful counter which returns results packed in a byte string
"""
def __init__(self, initialVector, blockSize):
"""
@type initialVector: C{str}
@param initialVector: A byte string representing the initial counter value.
@type blockSize: C{int}
@param blockSize: The length of the output buffer, as well as the
number of bytes at the beginning of C{initialVector} to consider.
"""
initialVector = initialVector[:blockSize]
self.count = getMP('\xff\xff\xff\xff' + initialVector)[0]
self.blockSize = blockSize
self.count = Util.number.long_to_bytes(self.count - 1)
self.count = '\x00' * (self.blockSize - len(self.count)) + self.count
self.count = array.array('c', self.count)
self.len = len(self.count) - 1
def __call__(self):
"""
Increment the counter and return the new value.
"""
i = self.len
while i > -1:
self.count[i] = n = chr((ord(self.count[i]) + 1) % 256)
if n == '\x00':
i -= 1
else:
return self.count.tostring()
self.count = array.array('c', '\x00' * self.blockSize)
return self.count.tostring()
def buffer_dump(b, title = None):
r = title or ''
while b:
c, b = b[: 16], b[16:]
while c:
a, c = c[: 2], c[2:]
if len(a) == 2:
r = r+'%02x%02x '%(ord(a[0]), ord(a[1]))
else:
r = r+'%02x'%ord(a[0])
r = r+'\n'
return r
DH_PRIME = 179769313486231590770839156793787453197860296048756011706444423684197180216158519368947833795864925541502180565485980503646440548199239100050792877003355816639229553136239076508735759914822574862575007425302077447712589550957937778424442426617334727629299387668709205606050270810842907692932019128194467627007L
DH_GENERATOR = 2L
MSG_DISCONNECT = 1
MSG_IGNORE = 2
MSG_UNIMPLEMENTED = 3
MSG_DEBUG = 4
MSG_SERVICE_REQUEST = 5
MSG_SERVICE_ACCEPT = 6
MSG_KEXINIT = 20
MSG_NEWKEYS = 21
MSG_KEXDH_INIT = 30
MSG_KEXDH_REPLY = 31
MSG_KEX_DH_GEX_REQUEST_OLD = 30
MSG_KEX_DH_GEX_REQUEST = 34
MSG_KEX_DH_GEX_GROUP = 31
MSG_KEX_DH_GEX_INIT = 32
MSG_KEX_DH_GEX_REPLY = 33
DISCONNECT_HOST_NOT_ALLOWED_TO_CONNECT = 1
DISCONNECT_PROTOCOL_ERROR = 2
DISCONNECT_KEY_EXCHANGE_FAILED = 3
DISCONNECT_RESERVED = 4
DISCONNECT_MAC_ERROR = 5
DISCONNECT_COMPRESSION_ERROR = 6
DISCONNECT_SERVICE_NOT_AVAILABLE = 7
DISCONNECT_PROTOCOL_VERSION_NOT_SUPPORTED = 8
DISCONNECT_HOST_KEY_NOT_VERIFIABLE = 9
DISCONNECT_CONNECTION_LOST = 10
DISCONNECT_BY_APPLICATION = 11
DISCONNECT_TOO_MANY_CONNECTIONS = 12
DISCONNECT_AUTH_CANCELLED_BY_USER = 13
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE = 14
DISCONNECT_ILLEGAL_USER_NAME = 15
messages = {}
for name, value in globals().items():
if name.startswith('MSG_'):
messages[value] = name
|
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.transient_error import TransientError # noqa: F401,E501
from orcid_api_v3.models.transient_non_empty_string import TransientNonEmptyString # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc1 import UrlV30Rc1 # noqa: F401,E501
class ExternalIDV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'external_id_type': 'str',
'external_id_value': 'str',
'external_id_normalized': 'TransientNonEmptyString',
'external_id_normalized_error': 'TransientError',
'external_id_url': 'UrlV30Rc1',
'external_id_relationship': 'str'
}
attribute_map = {
'external_id_type': 'external-id-type',
'external_id_value': 'external-id-value',
'external_id_normalized': 'external-id-normalized',
'external_id_normalized_error': 'external-id-normalized-error',
'external_id_url': 'external-id-url',
'external_id_relationship': 'external-id-relationship'
}
def __init__(self, external_id_type=None, external_id_value=None, external_id_normalized=None, external_id_normalized_error=None, external_id_url=None, external_id_relationship=None): # noqa: E501
"""ExternalIDV30Rc1 - a model defined in Swagger""" # noqa: E501
self._external_id_type = None
self._external_id_value = None
self._external_id_normalized = None
self._external_id_normalized_error = None
self._external_id_url = None
self._external_id_relationship = None
self.discriminator = None
self.external_id_type = external_id_type
self.external_id_value = external_id_value
if external_id_normalized is not None:
self.external_id_normalized = external_id_normalized
if external_id_normalized_error is not None:
self.external_id_normalized_error = external_id_normalized_error
if external_id_url is not None:
self.external_id_url = external_id_url
if external_id_relationship is not None:
self.external_id_relationship = external_id_relationship
@property
def external_id_type(self):
"""Gets the external_id_type of this ExternalIDV30Rc1. # noqa: E501
:return: The external_id_type of this ExternalIDV30Rc1. # noqa: E501
:rtype: str
"""
return self._external_id_type
@external_id_type.setter
def external_id_type(self, external_id_type):
"""Sets the external_id_type of this ExternalIDV30Rc1.
:param external_id_type: The external_id_type of this ExternalIDV30Rc1. # noqa: E501
:type: str
"""
if external_id_type is None:
raise ValueError("Invalid value for `external_id_type`, must not be `None`") # noqa: E501
self._external_id_type = external_id_type
@property
def external_id_value(self):
"""Gets the external_id_value of this ExternalIDV30Rc1. # noqa: E501
:return: The external_id_value of this ExternalIDV30Rc1. # noqa: E501
:rtype: str
"""
return self._external_id_value
@external_id_value.setter
def external_id_value(self, external_id_value):
"""Sets the external_id_value of this ExternalIDV30Rc1.
:param external_id_value: The external_id_value of this ExternalIDV30Rc1. # noqa: E501
:type: str
"""
if external_id_value is None:
raise ValueError("Invalid value for `external_id_value`, must not be `None`") # noqa: E501
self._external_id_value = external_id_value
@property
def external_id_normalized(self):
"""Gets the external_id_normalized of this ExternalIDV30Rc1. # noqa: E501
:return: The external_id_normalized of this ExternalIDV30Rc1. # noqa: E501
:rtype: TransientNonEmptyString
"""
return self._external_id_normalized
@external_id_normalized.setter
def external_id_normalized(self, external_id_normalized):
"""Sets the external_id_normalized of this ExternalIDV30Rc1.
:param external_id_normalized: The external_id_normalized of this ExternalIDV30Rc1. # noqa: E501
:type: TransientNonEmptyString
"""
self._external_id_normalized = external_id_normalized
@property
def external_id_normalized_error(self):
"""Gets the external_id_normalized_error of this ExternalIDV30Rc1. # noqa: E501
:return: The external_id_normalized_error of this ExternalIDV30Rc1. # noqa: E501
:rtype: TransientError
"""
return self._external_id_normalized_error
@external_id_normalized_error.setter
def external_id_normalized_error(self, external_id_normalized_error):
"""Sets the external_id_normalized_error of this ExternalIDV30Rc1.
:param external_id_normalized_error: The external_id_normalized_error of this ExternalIDV30Rc1. # noqa: E501
:type: TransientError
"""
self._external_id_normalized_error = external_id_normalized_error
@property
def external_id_url(self):
"""Gets the external_id_url of this ExternalIDV30Rc1. # noqa: E501
:return: The external_id_url of this ExternalIDV30Rc1. # noqa: E501
:rtype: UrlV30Rc1
"""
return self._external_id_url
@external_id_url.setter
def external_id_url(self, external_id_url):
"""Sets the external_id_url of this ExternalIDV30Rc1.
:param external_id_url: The external_id_url of this ExternalIDV30Rc1. # noqa: E501
:type: UrlV30Rc1
"""
self._external_id_url = external_id_url
@property
def external_id_relationship(self):
"""Gets the external_id_relationship of this ExternalIDV30Rc1. # noqa: E501
:return: The external_id_relationship of this ExternalIDV30Rc1. # noqa: E501
:rtype: str
"""
return self._external_id_relationship
@external_id_relationship.setter
def external_id_relationship(self, external_id_relationship):
"""Sets the external_id_relationship of this ExternalIDV30Rc1.
:param external_id_relationship: The external_id_relationship of this ExternalIDV30Rc1. # noqa: E501
:type: str
"""
allowed_values = ["PART_OF", "SELF"] # noqa: E501
if external_id_relationship not in allowed_values:
raise ValueError(
"Invalid value for `external_id_relationship` ({0}), must be one of {1}" # noqa: E501
.format(external_id_relationship, allowed_values)
)
self._external_id_relationship = external_id_relationship
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExternalIDV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExternalIDV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import PackageFinder
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.utils.deprecation import RemovedInPip7Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version, typ in self.find_packages_latest_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s [%s])',
dist.project_name, dist.version, version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
)
typ = 'unknown'
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
remote_version = finder._link_package_versions(
link, req.name
).version
if link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
|
|
from __future__ import unicode_literals
import os
import shutil
from git.cmd import Git
import dvc.logger as logger
from dvc.exceptions import DvcException
from dvc.stage import Stage
from dvc.temp_git_repo import TempGitRepo
class PackageParams(object):
def __init__(self, address, target_dir, select=[], file=None):
self.address = address
self.target_dir = target_dir
self.select = select
self.file = file
@property
def all_addresses(self):
if self.address:
return [self.address]
return PackageManager.read_packages()
class PackageManager(object):
PACKAGE_FILE = "package.yaml"
@staticmethod
def read_packages():
return []
@staticmethod
def get_package(addr):
for pkg_class in [GitPackage]:
try:
return pkg_class()
except Exception:
pass
return None
def __init__(self, addr):
self._addr = addr
class Package(object):
MODULES_DIR = "dvc_mod"
def install_or_update(self, parent_repo, pkg_param):
raise NotImplementedError(
"A method of abstract Package class was called"
)
def is_in_root(self):
return True
class GitPackage(Package):
DEF_DVC_FILE_PREFIX = "mod_"
def install_or_update(self, parent_repo, pkg_params):
if not self.is_in_root():
raise DvcException(
"This command can be run only from a repository root"
)
if not os.path.exists(self.MODULES_DIR):
logger.debug("Creating modules dir {}".format(self.MODULES_DIR))
os.makedirs(self.MODULES_DIR)
parent_repo.scm.ignore(os.path.abspath(self.MODULES_DIR))
module_name = (
Git.polish_url(pkg_params.address).strip("/").split("/")[-1]
)
if not module_name:
raise DvcException(
"Package address error: unable to extract package name"
)
with TempGitRepo(
pkg_params.address, module_name, Package.MODULES_DIR
) as tmp_repo:
outputs_to_copy = tmp_repo.outs
if pkg_params.select:
outputs_to_copy = list(
filter(
lambda out: out.dvc_path in pkg_params.select,
outputs_to_copy,
)
)
fetched_stage_files = set(
map(lambda o: o.stage.path, outputs_to_copy)
)
tmp_repo.fetch(fetched_stage_files)
module_dir = self.create_module_dir(module_name)
tmp_repo.persist_to(module_dir, parent_repo)
dvc_file = self.get_dvc_file_name(
pkg_params.file, pkg_params.target_dir, module_name
)
try:
self.persist_stage_and_scm_state(
parent_repo,
outputs_to_copy,
pkg_params.target_dir,
dvc_file,
)
except Exception as ex:
raise DvcException(
"Package '{}' was installed "
"but stage file '{}' "
"was not created properly: {}".format(
pkg_params.address, dvc_file, ex
)
)
parent_repo.checkout(dvc_file)
@staticmethod
def persist_stage_and_scm_state(
parent_repo, outputs_to_copy, target_dir, dvc_file
):
stage = Stage.create(
repo=parent_repo,
fname=dvc_file,
validate_state=False,
wdir=target_dir,
)
stage.outs = list(
map(lambda o: o.assign_to_stage_file(stage), outputs_to_copy)
)
for out in stage.outs:
parent_repo.scm.ignore(out.path, in_curr_dir=True)
stage.dump()
@staticmethod
def create_module_dir(module_name):
module_dir = os.path.join(GitPackage.MODULES_DIR, module_name)
if os.path.exists(module_dir):
logger.info("Updating package {}".format(module_name))
shutil.rmtree(module_dir)
else:
logger.info("Adding package {}".format(module_name))
return module_dir
def get_dvc_file_name(self, stage_file, target_dir, module_name):
if stage_file:
dvc_file_path = stage_file
else:
dvc_file_name = self.DEF_DVC_FILE_PREFIX + module_name + ".dvc"
dvc_file_path = os.path.join(target_dir, dvc_file_name)
return dvc_file_path
def install_pkg(self, pkg_params):
"""
Install package.
The command can be run only from DVC project root.
E.g.
Having: DVC package in https://github.com/dmpetrov/tag_classifier
$ dvc pkg install https://github.com/dmpetrov/tag_classifier
Result: tag_classifier package in dvc_mod/ directory
"""
if not os.path.isdir(pkg_params.target_dir):
logger.error(
"Unable to install package: "
"target directory '{}' does not exist".format(
pkg_params.target_dir
)
)
return 1
curr_dir = os.path.realpath(os.curdir)
if not os.path.realpath(pkg_params.target_dir).startswith(curr_dir):
logger.error(
"Unable to install package: the current dir should be"
" a subdirectory of the target dir {}".format(
pkg_params.target_dir
)
)
return 1
for addr in pkg_params.all_addresses:
try:
mgr = PackageManager.get_package(addr)
mgr.install_or_update(self, pkg_params)
except Exception as ex:
logger.error("Unable to install package: ".format(ex))
return 1
return 0
|
|
# -*- coding: utf-8 -*-
import re
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pandas import IntervalIndex, MultiIndex, RangeIndex
from pandas.compat import lrange, range
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_values_boxed():
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert index.is_unique is False
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),
columns=['dest'])
with pytest.raises(KeyError):
df_below_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_below_1000000.loc[(3, 0), 'dest']
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),
columns=['dest'])
with pytest.raises(KeyError):
df_above_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_above_1000000.loc[(3, 0), 'dest']
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame({'a': r, 'b': r},
index=pd.MultiIndex.from_tuples([(x, x) for x in r]))
with tm.assert_raises_regex(AttributeError,
"'Series' object has no attribute 'foo'"):
df['a'].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, labels = idx.levels, idx.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = idx.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)
df.index.names = ['fizz', 'buzz']
str(df)
expected = pd.DataFrame({'bar': np.arange(100),
'foo': np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)],
names=['fizz', 'buzz']))
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values('fizz')
expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values('buzz')
expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz')
tm.assert_index_equal(result, expected)
def test_hash_error(indices):
index = indices
tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__, hash, indices)
def test_mutability(indices):
if not len(indices):
return
pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
def test_wrong_number_names(indices):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
tm.assert_raises_regex(ValueError, "^Length", testit, indices)
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
idx.get_loc(idx[0])
result2 = idx.memory_usage()
result3 = idx.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(idx, (RangeIndex, IntervalIndex)):
assert result2 > result
if idx.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_nlevels(idx):
assert idx.nlevels == 2
|
|
# -*- coding: utf8 -*-
from scrapy.spider import BaseSpider
from scrapy.selector import Selector, XmlXPathSelector
import re
from scrapy import Request
import urllib
import datetime
from vegi.items import VegiItem
import os
class VegiSpider(BaseSpider):
name = "vegi"
allowed_domains = ["earthexplorer.usgs.gov"]
#N = 10
#base = datetime.datetime(2016, 3, 26)
#date_list = [ ( base - datetime.timedelta(days=x*5), base - datetime.timedelta(days=x*5) - datetime.timedelta(days=10) ) for x in range(0, N) ]
#ids = map(lambda x: x[0].strftime("%Y%m%d") + x[1].strftime("%Y%m%d"), date_list)
#urls = map(lambda x: "http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT{0}.jpg".format(x), ids) + map(lambda x: "http://earthexplorer.usgs.gov/metadata/fgdc/4700/EMAFT{0}/".format(x), ids)
#start_urls = ["http://earthexplorer.usgs.gov"]
start_urls = ["http://earthexplorer.usgs.gov"]
urls = [
"http://earthexplorer.usgs.gov/browse/emodis/AF/2012/EMAFT2012122620130105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013010120130110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013010620130115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013011120130120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013011620130125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013012120130131.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013012620130205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013020120130210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013020620130215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013021120130220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013021620130225.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013022120130228.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013022620130305.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013030120130310.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013030620130315.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013031120130320.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013031620130325.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013032120130331.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013032620130405.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013040120130410.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013040620130415.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013041120130420.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013041620130425.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013042120130430.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013042620130505.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013050120130510.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013050620130515.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013051120130520.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013051620130525.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013052120130531.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013052620130605.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013060120130610.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013060620130615.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013061120130620.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013061620130625.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013062120130630.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013062620130705.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013070120130710.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013070620130715.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013071120130720.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013071620130725.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013072120130731.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013072620130805.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013080120130810.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013080620130815.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013081120130820.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013081620130825.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013082120130831.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013082620130905.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013090120130910.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013090620130915.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013091120130920.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013091620130925.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013092120130930.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013092620131005.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013100120131010.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013100620131015.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013101120131020.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013101620131025.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013102120131031.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013102620131105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013110120131110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013110620131115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013111120131120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013111620131125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013112120131130.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013112620131205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013120120131210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013120620131215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013121120131220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013121620131225.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013122120131231.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2013/EMAFT2013122620140105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014010120140110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014010620140115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014011120140120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014011620140125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014012120140131.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014012620140205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014020120140210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014020620140215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014021120140220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014021620140225.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014022120140228.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014022620140305.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014030120140310.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014030620140315.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014031120140320.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014031620140325.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014032120140331.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014032620140405.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014040120140410.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014040620140415.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014041120140420.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014041620140425.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014042120140430.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014042620140505.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014050120140510.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014050620140515.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014051120140520.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014051620140525.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014052120140531.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014052620140605.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014060120140610.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014060620140615.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014061120140620.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014061620140625.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014062120140630.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014062620140705.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014070120140710.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014070620140715.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014071120140720.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014071620140725.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014072120140731.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014072620140805.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014080120140810.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014080620140815.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014081120140820.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014081620140825.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014082120140831.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014082620140905.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014090120140910.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014090620140915.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014091120140920.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014091620140925.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014092120140930.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014092620141005.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014100120141010.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014100620141015.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014101120141020.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014101620141025.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014102120141031.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014102620141105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014110120141110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014110620141115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014111120141120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014111620141125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014112120141130.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014112620141205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014120120141210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014120620141215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014121120141220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014121620141225.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014122120141231.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2014/EMAFT2014122620150105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015010120150110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015010620150115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015011120150120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015011620150125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015012120150131.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015012620150205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015020120150210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015020620150215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015021120150220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015021620150225.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015022120150228.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015022620150305.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015030120150310.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015030620150315.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015031120150320.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015031620150325.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015032120150331.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015032620150405.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015040120150410.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015040620150415.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015041120150420.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015041620150425.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015042120150430.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015042620150505.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015050120150510.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015050620150515.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015051120150520.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015051620150525.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015052120150531.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015052620150605.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015060120150610.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015060620150615.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015061120150620.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015061620150625.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015062120150630.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015062620150705.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015070120150710.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015070620150715.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015071120150720.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015071620150725.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015072620150805.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015080120150810.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015080620150815.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015081120150820.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015081620150825.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015082120150831.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015082620150905.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015102120151031.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015102620151105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015110120151110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015110620151115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015111120151120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015111620151125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015112120151130.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015112620151205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015120120151210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015120620151215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015121120151220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015121620151225.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015122120151231.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2015/EMAFT2015122620160105.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016010120160110.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016010620160115.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016011120160120.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016011620160125.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016012120160131.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016012620160205.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016020120160210.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016020620160215.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016021120160220.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016030120160310.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016030620160315.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016031120160320.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016031620160325.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016032120160331.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016032620160405.jpg",
"http://earthexplorer.usgs.gov/browse/emodis/AF/2016/EMAFT2016040120160410.jpg"
]
FILES_STORE = "D:/Workspace/NASA/data"
def save_image(self, response):
path = os.path.join(self.FILES_STORE, re.match("^.*(EMAFT\d{16}\.jpg)$", response.url).group(1))
with open(path, "wb") as f:
f.write(response.body)
def parse(self, response):
for url in self.urls:
yield Request(url, callback=self.save_image)
|
|
# orm/identity.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import weakref
from . import attributes
from .. import util
class IdentityMap(object):
def __init__(self):
self._dict = {}
self._modified = set()
self._wr = weakref.ref(self)
def keys(self):
return self._dict.keys()
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def _add_unpresent(self, state, key):
"""optional inlined form of add() which can assume item isn't present
in the map"""
self.add(state)
def update(self, dict):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
if state.modified:
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __len__(self):
return len(self._dict)
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __getitem__(self, key):
state = self._dict[key]
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if key in self._dict:
state = self._dict[key]
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
return state.key in self._dict and self._dict[state.key] is state
def replace(self, state):
if state.key in self._dict:
existing = self._dict[state.key]
if existing is not state:
self._manage_removed_state(existing)
else:
return
self._dict[state.key] = state
self._manage_incoming_state(state)
def add(self, state):
key = state.key
# inline of self.__contains__
if key in self._dict:
try:
existing_state = self._dict[key]
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise AssertionError(
"A conflicting state is already "
"present in the identity map for key %r"
% (key, ))
else:
return
except KeyError:
pass
self._dict[key] = state
self._manage_incoming_state(state)
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state
state._instance_dict = self._wr
def get(self, key, default=None):
if key not in self._dict:
return default
state = self._dict[key]
o = state.obj()
if o is None:
return default
return o
def items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
def __iter__(self):
return iter(self.keys())
if util.py2k:
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
def all_states(self):
if util.py2k:
return self._dict.values()
else:
return list(self._dict.values())
def _fast_discard(self, state):
self._dict.pop(state.key, None)
def discard(self, state):
st = self._dict.pop(state.key, None)
if st:
assert st is state
self._manage_removed_state(state)
def safe_discard(self, state):
if state.key in self._dict:
st = self._dict[state.key]
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
"""A 'strong-referencing' version of the identity map.
.. deprecated:: this object is present in order to fulfill
the ``weak_identity_map=False`` option of the Session.
This option is present to allow compatibility with older applications,
but it is recommended that strong references to objects
be maintained by the calling application
externally to the :class:`.Session` itself, to the degree
that is needed by the application.
"""
if util.py2k:
def itervalues(self):
return self._dict.itervalues()
def iteritems(self):
return self._dict.iteritems()
def __iter__(self):
return iter(self.dict_)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def get(self, key, default=None):
return self._dict.get(key, default)
def values(self):
return self._dict.values()
def items(self):
return self._dict.items()
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self and
attributes.instance_state(self[state.key]) is state)
def replace(self, state):
if state.key in self._dict:
existing = self._dict[state.key]
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
def add(self, state):
if state.key in self:
if attributes.instance_state(self._dict[state.key]) is not state:
raise AssertionError('A conflicting state is already '
'present in the identity map for key %r'
% (state.key, ))
else:
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state.obj()
state._instance_dict = self._wr
def _fast_discard(self, state):
self._dict.pop(state.key, None)
def discard(self, state):
obj = self._dict.pop(state.key, None)
if obj is not None:
self._manage_removed_state(state)
st = attributes.instance_state(obj)
assert st is state
def safe_discard(self, state):
if state.key in self._dict:
obj = self._dict[state.key]
st = attributes.instance_state(obj)
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
self._dict.clear()
self._dict.update(keepers)
self.modified = bool(dirty)
return ref_count - len(self)
|
|
import cgi
import os
from flask import Flask, render_template, abort, url_for, request, flash, session, redirect,Blueprint
from flaskext.markdown import Markdown
from mdx_github_gists import GitHubGistExtension
from individual import individual,blog
from mdx_strike import StrikeExtension
from mdx_quote import QuoteExtension
from blog_pages import register_pages
from mdx_code_multiline import MultilineCodeExtension
from werkzeug.contrib.atom import AtomFeed
import post
import user
import pagination
import settings
from helper_functions import *
from analytics import analytics
#app = Blueprint('FlaskBlog',__name__,template_folder='templates')
app=Flask(__name__)
#app.register_blueprint(individual)
#register_pages(app)
app.register_blueprint(analytics)
register_pages(app)
app.register_blueprint(blog, url_prefix='/blog')
app.register_blueprint(individual)
md = Markdown(app)
md.register_extension(GitHubGistExtension)
md.register_extension(StrikeExtension)
md.register_extension(QuoteExtension)
md.register_extension(MultilineCodeExtension)
app.config.from_object('config')
def template_settings():
"""Extra template globals/context"""
return dict(
CONTACT_EMAIL='[email protected]',
BASE_URL='http://jatin.kr')
app.context_processor(template_settings)
@app.route('/', defaults={'page': 1})
@app.route('/page-<int:page>')
def index(page):
skip = (page - 1) * int(app.config['PER_PAGE'])
posts = postClass.get_posts(int(app.config['PER_PAGE']), skip)
count = postClass.get_total_count()
pag = pagination.Pagination(page, app.config['PER_PAGE'], count)
return render_template('index.html', posts=posts['data'], pagination=pag, meta_title=app.config['BLOG_TITLE'])
@app.route('/tag/<tag>', defaults={'page': 1})
@app.route('/tag/<tag>/page-<int:page>')
def posts_by_tag(tag, page):
skip = (page - 1) * int(app.config['PER_PAGE'])
posts = postClass.get_posts(int(app.config['PER_PAGE']), skip, tag=tag)
count = postClass.get_total_count(tag=tag)
if not posts['data']:
abort(404)
pag = pagination.Pagination(page, app.config['PER_PAGE'], count)
return render_template('index.html', posts=posts['data'], pagination=pag, meta_title='Posts by tag: ' + tag)
@app.route('/post/<permalink>')
def single_post(permalink):
post = postClass.get_post_by_permalink(permalink)
if not post['data']:
abort(404)
return render_template('single_post.html', post=post['data'], meta_title=app.config['BLOG_TITLE'] + '::' + post['data']['title'])
@app.route('/q/<query>', defaults={'page': 1})
@app.route('/q/<query>/page-<int:page>')
def search_results(page, query):
skip = (page - 1) * int(app.config['PER_PAGE'])
if query:
posts = postClass.get_posts(
int(app.config['PER_PAGE']), skip, search=query)
else:
posts = []
posts['data'] = []
count = postClass.get_total_count(search=query)
pag = pagination.Pagination(page, app.config['PER_PAGE'], count)
return render_template('index.html', posts=posts['data'], pagination=pag, meta_title='Search results')
@app.route('/search', methods=['GET', 'POST'])
def search():
if request.method != 'POST':
return redirect(url_for('index'))
query = request.form.get('query', None)
if query:
return redirect(url_for('search_results', query=query))
else:
return redirect(url_for('index'))
@app.route('/newpost', methods=['GET', 'POST'])
@login_required()
def new_post():
error = False
error_type = 'validate'
if request.method == 'POST':
post_title = request.form.get('post-title').strip()
post_full = request.form.get('post-full')
if not post_title or not post_full:
error = True
else:
tags = cgi.escape(request.form.get('post-tags'))
tags_array = extract_tags(tags)
post_data = {'title': post_title,
'preview': request.form.get('post-short'),
'body': post_full,
'tags': tags_array,
'author': session['user']['username']}
post = postClass.validate_post_data(post_data)
if request.form.get('post-preview') == '1':
session['post-preview'] = post
session[
'post-preview']['action'] = 'edit' if request.form.get('post-id') else 'add'
if request.form.get('post-id'):
session[
'post-preview']['redirect'] = url_for('post_edit', id=request.form.get('post-id'))
else:
session['post-preview']['redirect'] = url_for('new_post')
return redirect(url_for('post_preview'))
else:
session.pop('post-preview', None)
if request.form.get('post-id'):
response = postClass.edit_post(
request.form['post-id'], post)
if not response['error']:
flash('Post updated!', 'success')
else:
flash(response['error'], 'error')
return redirect(url_for('posts'))
else:
response = postClass.create_new_post(post)
if response['error']:
error = True
error_type = 'post'
flash(response['error'], 'error')
else:
flash('New post created!', 'success')
else:
if session.get('post-preview') and session['post-preview']['action'] == 'edit':
session.pop('post-preview', None)
return render_template('new_post.html',
meta_title='New post',
error=error,
error_type=error_type)
@app.route('/post_preview')
@login_required()
def post_preview():
post = session.get('post-preview')
return render_template('preview.html', post=post, meta_title='Preview post::' + post['title'])
@app.route('/posts_list', defaults={'page': 1})
@app.route('/posts_list/page-<int:page>')
@login_required()
def posts(page):
session.pop('post-preview', None)
skip = (page - 1) * int(app.config['PER_PAGE'])
posts = postClass.get_posts(int(app.config['PER_PAGE']), skip)
count = postClass.get_total_count()
pag = pagination.Pagination(page, app.config['PER_PAGE'], count)
if not posts['data']:
abort(404)
return render_template('posts.html', posts=posts['data'], pagination=pag, meta_title='Posts')
@app.route('/post_edit?id=<id>')
@login_required()
def post_edit(id):
post = postClass.get_post_by_id(id)
if post['error']:
flash(post['error'], 'error')
return redirect(url_for('posts'))
if session.get('post-preview') and session['post-preview']['action'] == 'add':
session.pop('post-preview', None)
return render_template('edit_post.html',
meta_title='Edit post::' + post['data']['title'],
post=post['data'],
error=False,
error_type=False)
@app.route('/post_delete?id=<id>')
@login_required()
def post_del(id):
if postClass.get_total_count() > 1:
response = postClass.delete_post(id)
if response['data'] is True:
flash('Post removed!', 'success')
else:
flash(response['error'], 'error')
else:
flash('Need to be at least one post..', 'error')
return redirect(url_for('posts'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = False
error_type = 'validate'
if request.method == 'POST':
username = request.form.get('login-username')
password = request.form.get('login-password')
if not username or not password:
error = True
else:
user_data = userClass.login(username.lower().strip(), password)
if user_data['error']:
error = True
error_type = 'login'
flash(user_data['error'], 'error')
else:
userClass.start_session(user_data['data'])
flash('You are logged in!', 'success')
return redirect(url_for('posts'))
else:
if session.get('user'):
return redirect(url_for('posts'))
return render_template('login.html',
meta_title='Login',
error=error,
error_type=error_type)
@app.route('/logout')
def logout():
if userClass.logout():
flash('You are logged out!', 'success')
return redirect(url_for('login'))
@app.route('/users')
@login_required()
def users_list():
users = userClass.get_users()
return render_template('users.html', users=users['data'], meta_title='Users')
@app.route('/add_user')
@login_required()
def add_user():
gravatar_url = userClass.get_gravatar_link()
return render_template('add_user.html', gravatar_url=gravatar_url, meta_title='Add user')
@app.route('/edit_user?id=<id>')
@login_required()
def edit_user(id):
user = userClass.get_user(id)
return render_template('edit_user.html', user=user['data'], meta_title='Edit user')
@app.route('/delete_user?id=<id>')
@login_required()
def delete_user(id):
if id != session['user']['username']:
user = userClass.delete_user(id)
if user['error']:
flash(user['error'], 'error')
else:
flash('User deleted!', 'success')
return redirect(url_for('users_list'))
@app.route('/save_user', methods=['POST'])
@login_required()
def save_user():
post_data = {
'_id': request.form.get('user-id', None).lower().strip(),
'email': request.form.get('user-email', None),
'old_pass': request.form.get('user-old-password', None),
'new_pass': request.form.get('user-new-password', None),
'new_pass_again': request.form.get('user-new-password-again', None),
'update': request.form.get('user-update', False)
}
if not post_data['email'] or not post_data['_id']:
flash('Username and Email are required..', 'error')
if post_data['update']:
return redirect(url_for('edit_user', id=post_data['_id']))
else:
return redirect(url_for('add_user'))
else:
user = userClass.save_user(post_data)
if user['error']:
flash(user['error'], 'error')
if post_data['update']:
return redirect(url_for('edit_user', id=post_data['_id']))
else:
return redirect(url_for('add_user'))
else:
message = 'User updated!' if post_data['update'] else 'User added!'
flash(message, 'success')
return redirect(url_for('edit_user', id=post_data['_id']))
@app.route('/recent_feed')
def recent_feed():
feed = AtomFeed(app.config['BLOG_TITLE'] + '::Recent Articles',
feed_url=request.url, url=request.url_root)
posts = postClass.get_posts(int(app.config['PER_PAGE']), 0)
for post in posts['data']:
post_entry = post['preview'] if post['preview'] else post['body']
feed.add(post['title'], md(post_entry),
content_type='html',
author=post['author'],
url=make_external(
url_for('single_post', permalink=post['permalink'])),
updated=post['date'])
return feed.get_response()
@app.route('/settings', methods=['GET', 'POST'])
@login_required()
def blog_settings():
error = None
error_type = 'validate'
if request.method == 'POST':
blog_data = {
'title': request.form.get('blog-title', None),
'description': request.form.get('blog-description', None),
'per_page': request.form.get('blog-perpage', None),
'text_search': request.form.get('blog-text-search', None)
}
blog_data['text_search'] = 1 if blog_data['text_search'] else 0
for key, value in blog_data.items():
if not value and key != 'text_search' and key != 'description':
error = True
break
if not error:
update_result = settingsClass.update_settings(blog_data)
if update_result['error']:
flash(update_result['error'], 'error')
else:
flash('Settings updated!', 'success')
return redirect(url_for('blog_settings'))
return render_template('settings.html',
default_settings=app.config,
meta_title='Settings',
error=error,
error_type=error_type)
@app.route('/install', methods=['GET', 'POST'])
def install():
if session.get('installed', None):
return redirect(url_for('index'))
error = False
error_type = 'validate'
print 'Request method is %s'%(request.method)
if request.method == 'POST':
user_error = False
blog_error = False
user_data = {
'_id': request.form.get('user-id', None).lower().strip(),
'email': request.form.get('user-email', None),
'new_pass': request.form.get('user-new-password', None),
'new_pass_again': request.form.get('user-new-password-again', None),
'update': False
}
blog_data = {
'title': request.form.get('blog-title', None),
'description': request.form.get('blog-description', None),
'per_page': request.form.get('blog-perpage', None),
'text_search': request.form.get('blog-text-search', None)
}
blog_data['text_search'] = 1 if blog_data['text_search'] else 0
for key, value in user_data.items():
if not value and key != 'update':
user_error = True
break
for key, value in blog_data.items():
if not value and key != 'text_search' and key != 'description':
blog_error = True
break
if user_error or blog_error:
error = True
else:
install_result = settingsClass.install(blog_data, user_data)
if install_result['error']:
for i in install_result['error']:
if i is not None:
flash(i, 'error')
else:
session['installed'] = True
flash('Successfully installed!', 'success')
user_login = userClass.login(
user_data['_id'], user_data['new_pass'])
if user_login['error']:
flash(user_login['error'], 'error')
else:
userClass.start_session(user_login['data'])
flash('You are logged in!', 'success')
return redirect(url_for('posts'))
else:
if settingsClass.is_installed():
return redirect(url_for('index'))
return render_template('install.html',
default_settings=app.config,
error=error,
error_type=error_type,
meta_title='Install')
@app.before_request
def csrf_protect():
if request.method == "POST":
token = session.pop('_csrf_token', None)
if not token or token != request.form.get('_csrf_token'):
abort(400)
@app.before_request
def is_installed():
app.config = settingsClass.get_config()
app.jinja_env.globals['meta_description'] = app.config['BLOG_DESCRIPTION']
if not session.get('installed', None):
if url_for('static', filename='') not in request.path and request.path != url_for('install'):
if not settingsClass.is_installed():
return redirect(url_for('install'))
@app.before_request
def set_globals():
app.jinja_env.globals['csrf_token'] = generate_csrf_token
app.jinja_env.globals['recent_posts'] = postClass.get_posts(10, 0)['data']
app.jinja_env.globals['tags'] = postClass.get_tags()['data']
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html', meta_title='404'), 404
@app.template_filter('formatdate')
def format_datetime_filter(input_value, format_="%a, %d %b %Y"):
return input_value.strftime(format_)
settingsClass = settings.Settings(app.config)
postClass = post.Post(app.config)
userClass = user.User(app.config)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
app.jinja_env.globals['meta_description'] = app.config['BLOG_DESCRIPTION']
if not app.config['DEBUG']:
import logging
from logging import FileHandler
file_handler = FileHandler(app.config['LOG_FILE'])
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
if __name__ == '__main__':
app.config['SERVER_NAME']="127.0.0.1"
app.run(host=app.config['SERVER_NAME'], port=int(os.environ.get("PORT", 5000)),
debug=app.config['DEBUG'])
|
|
###############################################################################
# Copyright 2006 to the present, Orbitz Worldwide, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from kitt.interfaces import moduleProvides, IDroneDService
moduleProvides(IDroneDService) #requirement
from kitt.util import dictwrapper
import config
SERVICENAME = 'janitizer'
#default configuration cleanup logs older than 7 days
SERVICECONFIG = dictwrapper({
'JANITIZE': {
config.LOG_DIR: [
('.*.log.\d+.*', int(7*len(config.AUTOSTART_SERVICES))),
],
}
})
import os, re, time
from twisted.application.service import Service
from twisted.internet import defer, task
from droned.logging import logWithContext
from kitt.decorators import synchronizedDeferred, deferredAsThread
import copy
__doc__ = """
config [JANITOR_DICT, AUTOSTART_SERVICES]
This service when properly configured will keep the filesystem
cleaned up when running.
keep the most recent 10 copies of files that match the pattern
# files that don't match the pattern are ignored.
Janitizer.garbage = {
'/tmp/example1/log/directory' : [
( 'foo_[a-z].+\.log.*', 10)
]
}
"""
#logging context
log = logWithContext(type=SERVICENAME)
def ageCompare(f1,f2):
t1 = os.path.getmtime(f1)
t2 = os.path.getmtime(f2)
if t1 > t2: return 1
if t2 == t2: return 0
if t2 < t2: return -1
class Janitizer(Service):
minute = property(lambda foo: 60)
hour = property(lambda foo: 3600)
day = property(lambda foo: 86400)
week = property(lambda f: 604800)
oldfiles = {}
#get the watch dictionary from romeo
watchDict = property(lambda s: SERVICECONFIG.wrapped.get('JANITIZE',{}))
#lock aquired before starting a thread that modifies class state
busy = defer.DeferredLock()
def update(self, watchDict):
"""Inspects occurrence for a watchDict parameter and updates
the internal state of Janitizer
@param watchDict (dict)
return None
"""
#break references
tmp = copy.deepcopy(self.watchDict)
tmp.update(watchDict) #apply updates
SERVICECONFIG.JANITIZE = tmp
#this would have blocked the reactor w/o the thread
@synchronizedDeferred(busy)
@deferredAsThread
def garbageCheck(self):
"""Check for file patterns that are removeable"""
watchDict = copy.deepcopy(self.watchDict) #use locals for safety
for directory,garbageList in watchDict.iteritems():
if not os.path.exists(directory): continue
for pattern,limit in garbageList:
#blocking method in a thread
self.cleanupLinks(directory)
files = [os.path.join(directory,f) for f in os.listdir(directory) \
if re.search(pattern,f)]
files = sorted(files)
if len(files) > int(limit):
log('These files matched:\n\t%s' % '\n\t'.join(files))
while len(files) > int(limit):
oldfile = files.pop(0)
log('Deleting %s' % oldfile)
if os.path.islink(oldfile): continue
if os.path.isdir(oldfile):
for base, dirs, myfiles in os.walk(oldfile, topdown=False):
for name in myfiles:
os.remove(os.path.join(base, name))
for name in dirs:
os.rmdir(os.path.join(base, name))
os.rmdir(oldfile)
else: os.unlink(oldfile)
#blocking method in a thread
self.cleanupLinks(directory)
#this will block the reactor
def cleanupLinks(self, directory):
"""cleans broken symlinks
@param directory: (string)
return list
"""
files = [os.path.join(directory,f) for f in os.listdir(directory)]
for f in files[:]:
if not os.path.exists(f):
log('Removing broken symlink %s' % f)
os.unlink(f)
files.remove(f)
return files
def clean_old_files(self, directory, age, recurse=True):
"""mark this directory for cleaning at a certain age
@param directory: (string)
@param age: (float)
@param recurse: (bool)
return None
"""
self.oldfiles[directory] = (age,recurse)
#this would have blocked the reactor w/o the thread
@synchronizedDeferred(busy)
@deferredAsThread
def clean_elderly(self):
"""clean old files in a thread"""
for directory in self.oldfiles:
self.recursive_clean(directory,*self.oldfiles[directory])
#this will block the reactor
def recursive_clean(self, directory, age, recurse):
"""recusively clean a directory
@param directory: (string)
@param age: (float)
@param recurse: (bool)
return bool
"""
try: data = map(lambda n: os.path.join(directory,n), os.listdir(directory))
except:
log('could not find directory %s' % directory)
return
for node in data:
if os.path.isdir(node) and recurse:
#blocking method in a thread
empty = self.recursive_clean(node,age,recurse)
if empty:
try: os.rmdir(node)
except: log('could not remove directory: %s' % node)
continue
if os.path.isdir(node): continue #in case recurse is False
if (time.time() - os.stat(node).st_mtime) > age:
try: os.remove(node)
except: log('could not remove file: %s' % node)
return bool(os.listdir(directory))
def startService(self):
"""Start Janitizer Service"""
self.GARBAGE_CHECK = task.LoopingCall(self.garbageCheck)
self.ELDERLY_CHECK = task.LoopingCall(self.clean_elderly)
#start the service
Service.startService(self)
self.GARBAGE_CHECK.start(self.minute * 20)
self.ELDERLY_CHECK.start(self.minute)
def stopService(self):
"""Stop All Janitizer Service"""
try:
if self.GARBAGE_CHECK.running:
self.GARBAGE_CHECK.stop()
if self.ELDERLY_CHECK.running:
self.ELDERLY_CHECK.stop()
except: pass
Service.stopService(self)
# module state globals
parentService = None
service = None
#exported service api
def update(watchDict):
global service
if not running():
raise AssertionError('janitizer service is not running')
return service.update(watchDict)
###############################################################################
# API Requirements
###############################################################################
def install(_parentService):
global parentService
parentService = _parentService
def start():
global service
if not running():
service = Janitizer()
service.setName(SERVICENAME)
service.setServiceParent(parentService)
def stop():
global service
if running():
service.disownServiceParent()
service.stopService()
service = None
def running():
return bool(service) and service.running
__all__ = ['install', 'start', 'stop', 'running']
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container_id': 'str',
'image': 'str',
'image_id': 'str',
'last_state': 'V1ContainerState',
'name': 'str',
'ready': 'bool',
'restart_count': 'int',
'state': 'V1ContainerState'
}
attribute_map = {
'container_id': 'containerID',
'image': 'image',
'image_id': 'imageID',
'last_state': 'lastState',
'name': 'name',
'ready': 'ready',
'restart_count': 'restartCount',
'state': 'state'
}
def __init__(self, container_id=None, image=None, image_id=None, last_state=None, name=None, ready=None, restart_count=None, state=None):
"""
V1ContainerStatus - a model defined in Swagger
"""
self._container_id = None
self._image = None
self._image_id = None
self._last_state = None
self._name = None
self._ready = None
self._restart_count = None
self._state = None
self.discriminator = None
if container_id is not None:
self.container_id = container_id
self.image = image
self.image_id = image_id
if last_state is not None:
self.last_state = last_state
self.name = name
self.ready = ready
self.restart_count = restart_count
if state is not None:
self.state = state
@property
def container_id(self):
"""
Gets the container_id of this V1ContainerStatus.
Container's ID in the format 'docker://<container_id>'.
:return: The container_id of this V1ContainerStatus.
:rtype: str
"""
return self._container_id
@container_id.setter
def container_id(self, container_id):
"""
Sets the container_id of this V1ContainerStatus.
Container's ID in the format 'docker://<container_id>'.
:param container_id: The container_id of this V1ContainerStatus.
:type: str
"""
self._container_id = container_id
@property
def image(self):
"""
Gets the image of this V1ContainerStatus.
The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images
:return: The image of this V1ContainerStatus.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1ContainerStatus.
The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images
:param image: The image of this V1ContainerStatus.
:type: str
"""
if image is None:
raise ValueError("Invalid value for `image`, must not be `None`")
self._image = image
@property
def image_id(self):
"""
Gets the image_id of this V1ContainerStatus.
ImageID of the container's image.
:return: The image_id of this V1ContainerStatus.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""
Sets the image_id of this V1ContainerStatus.
ImageID of the container's image.
:param image_id: The image_id of this V1ContainerStatus.
:type: str
"""
if image_id is None:
raise ValueError("Invalid value for `image_id`, must not be `None`")
self._image_id = image_id
@property
def last_state(self):
"""
Gets the last_state of this V1ContainerStatus.
Details about the container's last termination condition.
:return: The last_state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._last_state
@last_state.setter
def last_state(self, last_state):
"""
Sets the last_state of this V1ContainerStatus.
Details about the container's last termination condition.
:param last_state: The last_state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._last_state = last_state
@property
def name(self):
"""
Gets the name of this V1ContainerStatus.
This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.
:return: The name of this V1ContainerStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerStatus.
This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.
:param name: The name of this V1ContainerStatus.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def ready(self):
"""
Gets the ready of this V1ContainerStatus.
Specifies whether the container has passed its readiness probe.
:return: The ready of this V1ContainerStatus.
:rtype: bool
"""
return self._ready
@ready.setter
def ready(self, ready):
"""
Sets the ready of this V1ContainerStatus.
Specifies whether the container has passed its readiness probe.
:param ready: The ready of this V1ContainerStatus.
:type: bool
"""
if ready is None:
raise ValueError("Invalid value for `ready`, must not be `None`")
self._ready = ready
@property
def restart_count(self):
"""
Gets the restart_count of this V1ContainerStatus.
The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.
:return: The restart_count of this V1ContainerStatus.
:rtype: int
"""
return self._restart_count
@restart_count.setter
def restart_count(self, restart_count):
"""
Sets the restart_count of this V1ContainerStatus.
The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.
:param restart_count: The restart_count of this V1ContainerStatus.
:type: int
"""
if restart_count is None:
raise ValueError("Invalid value for `restart_count`, must not be `None`")
self._restart_count = restart_count
@property
def state(self):
"""
Gets the state of this V1ContainerStatus.
Details about the container's current condition.
:return: The state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this V1ContainerStatus.
Details about the container's current condition.
:param state: The state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
import Tkinter as Tk
import tkFileDialog
import tkMessageBox
import os
from sets import Set
import itertools
import json
import run_smia_cukie
import sys
import tkFont
def runanalysis(config_path):
try:
# TODO: add logging and other command line flags here.
namespace = run_smia_cukie.get_args_namespace([config_path])
run_smia_cukie.run_smia_cukie(namespace)
Tk.Tk().withdraw() # get rid of top level window
tkMessageBox.showinfo("Success!,", "Success!!!\n")
except:
Tk.Tk().withdraw() # get rid of top level window
tkMessageBox.showerror("ERROR!", "An error occurred! \nSee terminal output")
print sys.exc_info()
sys.exit(1)
def isuseless(name_list):
for name in name_list:
if "NOT" + name in name_list:
return True
return False
def createdict():
global basedir, output_dir,nummasks,nummarkers,mask_list,marker_list,output_images,output_thumbnails,white_list
config = {}
config['base_directory'] = basedir
config['num_pictures'] = nummasks+nummarkers
config['num_masks'] = nummasks
config['mask_names'] = mask_list
config['num_markers'] = nummarkers
config['marker_names'] = marker_list
config['overlay_white_list'] = white_list
config['output_images'] = output_images
config['output_thumbnails'] = output_thumbnails
config['output_to'] = output_dir
return config
def createalloverlaynames():
global mask_list
mask_names_ = [x[1] for x in mask_list] #grab all mask names
marker_names_ = [x[1] for x in marker_list] # grab all marker names
mask_names = []
marker_names = []
for name in mask_names_:
mask_names.append(name)
mask_names.append("NOT"+name)
for name in marker_names_:
marker_names.append(name)
marker_names.append("NOT"+name)
all_together = mask_names + marker_names
all_together = Set(all_together)
combinations = []
marker_combos = []
for i in xrange(1,len(all_together)+1):
for marker_combination in itertools.combinations(all_together,i):
if not isuseless(marker_combination):
marker_combos.append(marker_combination)
# for every mask, generate all combinations
for combo in itertools.product(marker_names,marker_combos):
marker = combo[0]
mask = combo[1]
overlay = marker + " under " + ", ".join(mask)
combinations.append(overlay)
# generate all combinations
return combinations
def GetPicList(basedir):
"""
base_dir
-> batch1
-> we
-> want
-> these
-> images
-> batch2
"""
filename = ''
for name in os.listdir(basedir):
if not name.startswith('.'):
filename = name
break
if not filename:
raise ValueError("Couldn't find any non-hidden directories in basedir")
pic_list = os.listdir(os.path.join(basedir,filename))
for pic in pic_list:
pic = pic.replace('.tif','')
return pic_list
def firstthings():
global basedir,nummasks,nummarkers, output_images, output_thumbnails, output_dir
basedir = basedir_entry.get()
output_dir = outdir_entry.get()
nummasks = int(nummasks_entry.get())
nummarkers = int(nummarkers_entry.get())
output_images = True if output_images_var.get() else False
output_thumbnails = True if output_thumbnails_var.get() else False
first_options.destroy()
def maskthings():
global mask_list, dir_list
for prefix,name,threshold in mask_objects:
prefix = prefix.get()
name = name.get()
thresh = threshold.get()
# make it easier later on
for item in dir_list:
if prefix in dir_list:
dir_list.remove(item)
mask_list.append((prefix,name,thresh))
masks.destroy()
def markerthings():
global marker_list, dir_list
for prefix,name,threshold in marker_objects:
prefix = prefix.get()
name = name.get()
thresh = threshold.get()
# this way we remove repition errors
for item in dir_list:
if prefix in dir_list:
dir_list.remove(item)
marker_list.append((prefix,name,thresh))
markers.destroy()
def addtowhitelistfield():
global white_list
op = input_entry.get()
input_entry.delete(0,Tk.END)
new_label = Tk.Label(white,fg='blue',text=op)
new_label.grid(columnspan=2)
white_list.append(op)
def addtowhitelist():
# On Enter, grab the option, and add it to the label list
global white_list
# create a label, and add it to the grid
op = overlay_var.get()
new_label = Tk.Label(white,fg='blue',text=op)
new_label.grid(columnspan=2)
white_list.append(op)
def whitelistcontinue():
# we have our white list continue
white.destroy()
def setbase():
global basedir_entry
directory = tkFileDialog.askdirectory()
basedir_entry.delete(0,Tk.END)
basedir_entry.insert(0, directory)
def setresults():
global outdir_entry
directory = tkFileDialog.askdirectory()
outdir_entry.delete(0,Tk.END)
outdir_entry.insert(0, directory)
def setlocation():
global proloc_entry
directory = tkFileDialog.askdirectory()
proloc_entry.delete(0,Tk.END)
proloc_entry.insert(0, directory)
def makeconfigfile(output_dir,output_dict):
print output_dict
jayson = json.dumps(output_dict, indent=4, sort_keys=True)
config_loc = os.path.join(output_dir,"config_used.config")
with open(config_loc, 'w+') as config_file:
config_file.write(jayson)
return config_loc
def setconfig():
directory = tkFileDialog.askopenfilename()
config_loc_entry.delete(0,Tk.END)
config_loc_entry.insert(0, directory)
def setnewres():
directory = tkFileDialog.askdirectory()
result_loc.delete(0,Tk.END)
result_loc.insert(0, directory)
def runfromconfig():
global from_config
# grab the new configuration file
config_file = config_loc_entry.get()
# update the results file
new_result_loc = result_loc.get()
# open the config file, load the json dict
# change the dict and write the file back.
to_change = {}
with open(config_file,'rb') as f:
to_change = json.load(f)
to_change['output_to'] = new_result_loc
jayson = json.dumps(to_change, indent=4, sort_keys=True)
newloc = os.path.join(new_result_loc,'used_config.config')
with open(newloc, 'wb') as f:
f.write(jayson)
from_config.destroy()
# now pass the new file into images.py main function
runanalysis(newloc)
sys.exit(0)
def makenew():
# if we get to here let's destroy the window and just move on
from_config.destroy()
if __name__ == '__main__':
############### From Config ##########################
from_config = Tk.Tk()
from_config.wm_title("SMIA-CUKIE")
customFont = tkFont.Font(root=from_config,family="Helvetica", size=80)
title_label = Tk.Label(from_config,text="SMIA-CUKIE", font = customFont, fg='blue')
title_explain = Tk.Label(from_config,text="Simultaneous Multi-Channel Immunofluorescence Analysis\n By Gil Cukierman",fg='blue')
instructions_label = Tk.Label(justify=Tk.LEFT,anchor=Tk.W, text = "\n\nIf you already have a configuration file, proceed below and press 'Run Analysis'\nOtherwise, press 'Make New' to create a configuration file\n\n")
config_loc_entry = Tk.Entry(from_config, width=50)
config_loc_entry.insert(0, "Configuration File Location")
browse = Tk.Button(from_config, text="Browse", command = setconfig)
# browse for new results location
result_loc = Tk.Entry(from_config, width=50)
result_loc.insert(0, "New Result Location")
browseres = Tk.Button(from_config, text="Browse", command = setnewres)
run = Tk.Button(from_config, text="Run Analysis", command=runfromconfig)
dontrun = Tk.Button(from_config, text="Make New", command=makenew)
title_label.grid(columnspan=2)
title_explain.grid(columnspan=2)
instructions_label.grid()
config_loc_entry.grid(row=4,column=0)
browse.grid(row=4,column=1)
result_loc.grid(row=5,column=0)
browseres.grid(row=5,column=1)
run.grid()
dontrun.grid()
from_config.mainloop()
################ First Options Window ############################
first_options = Tk.Tk()
first_options.wm_title("Initial Options")
### OUR EVENTUAL CONFIGURATION VARIABLES ###
basedir = 'default'
output_dir = 'default'
nummasks = 'default'
nummarkers = 'default'
mask_list = []
marker_list = []
white_list = []
output_images = False
output_thumbnails = False
customFont = tkFont.Font(root=first_options,family="Helvetica", size=80)
title_label = Tk.Label(first_options,text="SMIA-CUKIE", font = customFont, fg='blue')
title_explain = Tk.Label(first_options,text="Simultaneous Multi-Channel Immunofluorescence Analysis\n By Gil Cukierman",fg='blue')
instructions_label = Tk.Label(first_options,justify=Tk.LEFT,anchor=Tk.W, text = "\n\nFill in the options below.\nUse Caution with outputing images and thumbnails(No more than 10 batches)!!\n")
basedir_entry = Tk.Entry(first_options, width=80)
basedir_entry.insert(0, "Base Directory")
browse1 = Tk.Button(first_options, text="Browse", command = setbase)
outdir_entry = Tk.Entry(first_options, width=80)
outdir_entry.insert(0, "Results Directory")
browse2 = Tk.Button(first_options, text="Browse", command = setresults)
nummasks_entry = Tk.Entry(first_options)
nummasks_entry.insert(0, "How many masks?")
nummarkers_entry = Tk.Entry(first_options)
nummarkers_entry.insert(0, "How many markers?")
output_images_var = Tk.IntVar()
output_images_button = Tk.Checkbutton(first_options, text="Output Full-Sized Images", variable=output_images_var)
output_thumbnails_var = Tk.IntVar()
output_thumbnails_button = Tk.Checkbutton(first_options, text="Output Thumbnail Images", variable=output_thumbnails_var)
pressme = Tk.Button(first_options, text="Continue", command = firstthings)
# proloc_entry.grid(row=1,column=0)
# browse3.grid(row=1,column=1)
title_label.grid(columnspan=2)
title_explain.grid(columnspan=2)
instructions_label.grid()
basedir_entry.grid(row=4,column=0)
browse1.grid(row=4,column=1)
outdir_entry.grid(row=5,column=0)
browse2.grid(row=5,column=1)
nummasks_entry.grid()
nummarkers_entry.grid()
output_images_button.grid()
output_thumbnails_button.grid()
pressme.grid()
first_options.mainloop()
################### Mask Options Window ###############################
masks = Tk.Tk()
masks.wm_title("Mask Options")
mask_objects = []
dir_list = GetPicList(basedir)
mask_label = Tk.Label(masks, text="Mask Options")
mask_label.grid()
for i in range(nummasks):
# prefix = Tk.Entry(masks)
# prefix.insert(0, "mask prefix")
prefix_var = Tk.StringVar(masks)
prefix_var.set("Choose Mask Prefix") # initial value
prefix = apply(Tk.OptionMenu, (masks,prefix_var) + tuple(dir_list))
common_sub = Tk.Entry(masks)
common_sub.insert(0,"common subsequence")
name = Tk.Entry(masks)
name.insert(0, "mask name")
threshold = Tk.Scale(masks, from_=0, to=255, orient=Tk.HORIZONTAL, label="threshold", length=255)
prefix.grid(row=i, column=0)
common_sub.grid(row=i,column=1)
name.grid(row=i, column=2)
threshold.grid(row=i, column=3)
mask_objects.append((common_sub,name,threshold))
pressme = Tk.Button(masks, text="Continue", command = maskthings)
pressme.grid()
masks.mainloop()
print "done masks..."
################ Marker Options Window ###############################################
markers = Tk.Tk()
markers.wm_title("Marker Options")
marker_objects = []
# dir_list = GetPicList(basedir)
marker_label = Tk.Label(markers, text="Marker Options")
marker_label.grid()
for i in range(nummarkers):
# prefix = Tk.Entry(masks)
# prefix.insert(0, "mask prefix")
prefix_var = Tk.StringVar(markers)
prefix_var.set("Choose Marker Prefix") # initial value
prefix = apply(Tk.OptionMenu, (markers,prefix_var) + tuple(dir_list))
common_sub = Tk.Entry(markers)
common_sub.insert(0,"common subsequence")
name = Tk.Entry(markers)
name.insert(0, "Marker name")
threshold = Tk.Entry(markers)
threshold.insert(0, "Marker threshold")
threshold = Tk.Scale(markers, from_=0, to=255, orient=Tk.HORIZONTAL, label="threshold", length=255)
prefix.grid(row=i, column=0)
common_sub.grid(row=i, column=1)
name.grid(row=i, column=2)
threshold.grid(row=i, column=3)
marker_objects.append((common_sub,name,threshold))
pressme = Tk.Button(markers, text="Continue", command = markerthings)
pressme.grid()
masks.mainloop()
print "done markers..."
############## White List ######################
white = Tk.Tk()
white.wm_title("Choose Overlays")
all_options = createalloverlaynames()
overlay_var = Tk.StringVar(white)
overlay_var.set("Choose Which Overlays to Perform") # initial value
overlay_options = apply(Tk.OptionMenu, (white,overlay_var) + tuple(all_options))
enter_button = Tk.Button(white, text="Enter", command = addtowhitelist)
input_entry = Tk.Entry(white,width=40)
enter_button2 = Tk.Button(white,text="Enter",command=addtowhitelistfield)
pressme = Tk.Button(white, text="Run Analysis", command = whitelistcontinue)
overlay_options.grid(row=0,column=0)
enter_button.grid(row=0,column=1)
input_entry.grid(row=1,column=0)
enter_button2.grid(row=1,column=1)
pressme.grid()
white.mainloop()
# create our configuration dict
config_dict = createdict()
# write it to a file in json format
config_path = makeconfigfile(output_dir, config_dict)
runanalysis(config_path)
|
|
import re
import urlparse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
# Avoid shadowing the login() view below.
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm
from django.contrib.auth.tokens import default_token_generator
from django.views.decorators.csrf import csrf_protect
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.sites.models import get_current_site
from django.http import HttpResponseRedirect, Http404, QueryDict
from django.template import RequestContext
from django.utils.http import urlsafe_base64_decode
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.views.decorators.cache import never_cache
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""Displays the login form and handles the login action."""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(data=request.POST)
if form.is_valid():
netloc = urlparse.urlparse(redirect_to)[1]
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Heavier security check -- don't allow redirection to a different
# host.
elif netloc and netloc != request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
current_app=None, extra_context=None):
"Logs out the user and displays 'You are logged out' message."
from django.contrib.auth import logout
logout(request)
if next_page is None:
redirect_to = request.REQUEST.get(redirect_field_name, '')
if redirect_to:
return HttpResponseRedirect(redirect_to)
else:
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
else:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page or request.path)
def logout_then_login(request, login_url=None, current_app=None, extra_context=None):
"Logs out the user if he is logged in. Then redirects to the log-in page."
if not login_url:
login_url = settings.LOGIN_URL
return logout(request, login_url, current_app=current_app, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"Redirects the user to the login page, passing the given 'next' page"
if not login_url:
login_url = settings.LOGIN_URL
login_url_parts = list(urlparse.urlparse(login_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlparse.urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request, is_admin_site=False,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'request': request,
}
if is_admin_site:
opts = dict(opts, domain_override=request.META['HTTP_HOST'])
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_reset_done(request,
template_name='registration/password_reset_done.html',
current_app=None, extra_context=None):
context = {}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
# Doesn't need csrf_protect since no-one can guess the URL
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete')
try:
uid = urlsafe_base64_decode(str(uidb64))
user = User.objects.get(id=uid)
except (TypeError, ValueError, User.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(None)
else:
validlink = False
form = None
context = {
'form': form,
'validlink': validlink,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
current_app=None, extra_context=None):
context = {
'login_url': settings.LOGIN_URL
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('django.contrib.auth.views.password_change_done')
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_change_done(request,
template_name='registration/password_change_done.html',
current_app=None, extra_context=None):
context = {}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
|
|
# Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the API for circuit transformers in Cirq."""
import dataclasses
import inspect
import enum
import functools
import textwrap
from typing import (
Any,
Tuple,
Hashable,
List,
overload,
Optional,
Type,
TYPE_CHECKING,
TypeVar,
)
from typing_extensions import Protocol
if TYPE_CHECKING:
import cirq
class LogLevel(enum.Enum):
"""Different logging resolution options for `cirq.TransformerLogger`.
The enum values of the logging levels are used to filter the stored logs when printing.
In general, a logging level `X` includes all logs stored at a level >= 'X'.
Args:
ALL: All levels. Used to filter logs when printing.
DEBUG: Designates fine-grained informational events that are most useful to debug /
understand in-depth any unexpected behavior of the transformer.
INFO: Designates informational messages that highlight the actions of a transformer.
WARNING: Designates unwanted or potentially harmful situations.
NONE: No levels. Used to filter logs when printing.
"""
ALL = 0
DEBUG = 10
INFO = 20
WARNING = 30
NONE = 40
@dataclasses.dataclass
class _LoggerNode:
"""Stores logging data of a single transformer stage in `cirq.TransformerLogger`.
The class is used to define a logging graph to store logs of sequential or nested transformers.
Each node corresponds to logs of a single transformer stage.
Args:
transformer_id: Integer specifying a unique id for corresponding transformer stage.
transformer_name: Name of the corresponding transformer stage.
initial_circuit: Initial circuit before the transformer stage began.
final_circuit: Final circuit after the transformer stage ended.
logs: Messages logged by the transformer stage.
nested_loggers: `transformer_id`s of nested transformer stages which were called by
the current stage.
"""
transformer_id: int
transformer_name: str
initial_circuit: 'cirq.AbstractCircuit'
final_circuit: 'cirq.AbstractCircuit'
logs: List[Tuple[LogLevel, Tuple[str, ...]]] = dataclasses.field(default_factory=list)
nested_loggers: List[int] = dataclasses.field(default_factory=list)
class TransformerLogger:
"""Base Class for transformer logging infrastructure. Defaults to text-based logging.
The logger implementation should be stateful, s.t.:
- Each call to `register_initial` registers a new transformer stage and initial circuit.
- Each subsequent call to `log` should store additional logs corresponding to the stage.
- Each call to `register_final` should register the end of the currently active stage.
The logger assumes that
- Transformers are run sequentially.
- Nested transformers are allowed, in which case the behavior would be similar to a
doing a depth first search on the graph of transformers -- i.e. the top level transformer
would end (i.e. receive a `register_final` call) once all nested transformers (i.e. all
`register_initial` calls received while the top level transformer was active) have
finished (i.e. corresponding `register_final` calls have also been received).
- This behavior can be simulated by maintaining a stack of currently active stages and
adding data from `log` calls to the stage at the top of the stack.
The `LogLevel`s can be used to control the input processing and output resolution of the logs.
"""
def __init__(self):
"""Initializes TransformerLogger."""
self._curr_id: int = 0
self._logs: List[_LoggerNode] = []
self._stack: List[int] = []
def register_initial(self, circuit: 'cirq.AbstractCircuit', transformer_name: str) -> None:
"""Register the beginning of a new transformer stage.
Args:
circuit: Input circuit to the new transformer stage.
transformer_name: Name of the new transformer stage.
"""
if self._stack:
self._logs[self._stack[-1]].nested_loggers.append(self._curr_id)
self._logs.append(_LoggerNode(self._curr_id, transformer_name, circuit, circuit))
self._stack.append(self._curr_id)
self._curr_id += 1
def log(self, *args: str, level: LogLevel = LogLevel.INFO) -> None:
"""Log additional metadata corresponding to the currently active transformer stage.
Args:
*args: The additional metadata to log.
level: Logging level to control the amount of metadata that gets put into the context.
Raises:
ValueError: If there's no active transformer on the stack.
"""
if len(self._stack) == 0:
raise ValueError('No active transformer found.')
self._logs[self._stack[-1]].logs.append((level, args))
def register_final(self, circuit: 'cirq.AbstractCircuit', transformer_name: str) -> None:
"""Register the end of the currently active transformer stage.
Args:
circuit: Final transformed output circuit from the transformer stage.
transformer_name: Name of the (currently active) transformer stage which ends.
Raises:
ValueError: If `transformer_name` is different from currently active transformer name.
"""
tid = self._stack.pop()
if self._logs[tid].transformer_name != transformer_name:
raise ValueError(
f"Expected `register_final` call for currently active transformer "
f"{self._logs[tid].transformer_name}."
)
self._logs[tid].final_circuit = circuit
def show(self, level: LogLevel = LogLevel.INFO) -> None:
"""Show the stored logs >= level in the desired format.
Args:
level: The logging level to filter the logs with. The method shows all logs with a
`LogLevel` >= `level`.
"""
def print_log(log: _LoggerNode, pad=''):
print(pad, f"Transformer-{1+log.transformer_id}: {log.transformer_name}", sep='')
print(pad, "Initial Circuit:", sep='')
print(textwrap.indent(str(log.initial_circuit), pad), "\n", sep='')
for log_level, log_text in log.logs:
if log_level.value >= level.value:
print(pad, log_level, *log_text)
print("\n", pad, "Final Circuit:", sep='')
print(textwrap.indent(str(log.final_circuit), pad))
print("----------------------------------------")
done = [0] * self._curr_id
for i in range(self._curr_id):
# Iterative DFS.
stack = [(i, '')] if not done[i] else []
while len(stack) > 0:
log_id, pad = stack.pop()
print_log(self._logs[log_id], pad)
done[log_id] = True
for child_id in self._logs[log_id].nested_loggers[::-1]:
stack.append((child_id, pad + ' ' * 4))
class NoOpTransformerLogger(TransformerLogger):
"""All calls to this logger are a no-op"""
def register_initial(self, circuit: 'cirq.AbstractCircuit', transformer_name: str) -> None:
pass
def log(self, *args: str, level: LogLevel = LogLevel.INFO) -> None:
pass
def register_final(self, circuit: 'cirq.AbstractCircuit', transformer_name: str) -> None:
pass
def show(self, level: LogLevel = LogLevel.INFO) -> None:
pass
@dataclasses.dataclass(frozen=True)
class TransformerContext:
"""Stores common configurable options for transformers.
Args:
logger: `cirq.TransformerLogger` instance, which is a stateful logger used for logging
the actions of individual transformer stages. The same logger instance should be
shared across different transformer calls.
tags_to_ignore: Tuple of tags which should be ignored while applying transformations on a
circuit. Transformers should not transform any operation marked with a tag that
belongs to this tuple. Note that any instance of a Hashable type (like `str`,
`cirq.VirtualTag` etc.) is a valid tag.
"""
logger: TransformerLogger = NoOpTransformerLogger()
tags_to_ignore: Tuple[Hashable, ...] = ()
class TRANSFORMER(Protocol):
def __call__(
self, circuit: 'cirq.AbstractCircuit', *, context: Optional[TransformerContext] = None
) -> 'cirq.AbstractCircuit':
...
_TRANSFORMER_T = TypeVar('_TRANSFORMER_T', bound=TRANSFORMER)
_TRANSFORMER_CLS_T = TypeVar('_TRANSFORMER_CLS_T', bound=Type[TRANSFORMER])
@overload
def transformer(cls_or_func: _TRANSFORMER_T) -> _TRANSFORMER_T:
pass
@overload
def transformer(cls_or_func: _TRANSFORMER_CLS_T) -> _TRANSFORMER_CLS_T:
pass
def transformer(cls_or_func: Any) -> Any:
"""Decorator to verify API and append logging functionality to transformer functions & classes.
A transformer is a callable that takes as inputs a `cirq.AbstractCircuit` and
`cirq.TransformerContext`, and returns another `cirq.AbstractCircuit` without
modifying the input circuit. A transformer could be a function, for example:
>>> @cirq.transformer
>>> def convert_to_cz(
>>> circuit: cirq.AbstractCircuit, *, context: Optional[cirq.TransformerContext] = None
>>> ) -> cirq.Circuit:
>>> ...
Or it could be a class that implements `__call__` with the same API, for example:
>>> @cirq.transformer
>>> class ConvertToSqrtISwaps:
>>> def __init__(self):
>>> ...
>>> def __call__(
>>> self,
>>> circuit: cirq.AbstractCircuit,
>>> *,
>>> context: Optional[cirq.TransformerContext] = None,
>>> ) -> cirq.Circuit:
>>> ...
Note that transformers which take additional parameters as `**kwargs`, with default values
specified for each keyword argument, are also supported. For example:
>>> @cirq.transformer
>>> def convert_to_sqrt_iswap(
>>> circuit: cirq.AbstractCircuit,
>>> *,
>>> context: Optional[cirq.TransformerContext] = None,
>>> atol: float = 1e-8,
>>> sqrt_iswap_gate: cirq.ISwapPowGate = cirq.SQRT_ISWAP_INV,
>>> cleanup_operations: bool = True,
>>> ) -> cirq.Circuit:
>>> pass
Args:
cls_or_func: The callable class or function to be decorated.
Returns:
Decorated class / function which includes additional logging boilerplate.
"""
if isinstance(cls_or_func, type):
cls = cls_or_func
method = cls.__call__
default_context = _get_default_context(method)
@functools.wraps(method)
def method_with_logging(
self, circuit: 'cirq.AbstractCircuit', **kwargs
) -> 'cirq.AbstractCircuit':
return _transform_and_log(
lambda circuit, **kwargs: method(self, circuit, **kwargs),
cls.__name__,
circuit,
kwargs.get('context', default_context),
**kwargs,
)
setattr(cls, '__call__', method_with_logging)
return cls
else:
assert callable(cls_or_func)
func = cls_or_func
default_context = _get_default_context(func)
@functools.wraps(func)
def func_with_logging(circuit: 'cirq.AbstractCircuit', **kwargs) -> 'cirq.AbstractCircuit':
return _transform_and_log(
func,
func.__name__,
circuit,
kwargs.get('context', default_context),
**kwargs,
)
return func_with_logging
def _get_default_context(func: TRANSFORMER):
sig = inspect.signature(func)
default_context = sig.parameters["context"].default
assert (
default_context != inspect.Parameter.empty
), "`context` argument must have a default value specified."
return default_context
def _transform_and_log(
func: TRANSFORMER,
transformer_name: str,
circuit: 'cirq.AbstractCircuit',
extracted_context: Optional[TransformerContext],
**kwargs,
) -> 'cirq.AbstractCircuit':
"""Helper to log initial and final circuits before and after calling the transformer."""
if extracted_context:
extracted_context.logger.register_initial(circuit, transformer_name)
transformed_circuit = func(circuit, **kwargs)
if extracted_context:
extracted_context.logger.register_final(transformed_circuit, transformer_name)
return transformed_circuit
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake volume API."""
import uuid
from oslo_log import log as logging
from oslo_utils import timeutils
import nova.conf
from nova import exception
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class fake_volume(object):
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, size, name,
description, volume_id, snapshot,
volume_type, metadata,
availability_zone):
snapshot_id = None
if snapshot is not None:
snapshot_id = snapshot['id']
if volume_id is None:
volume_id = str(uuid.uuid4())
self.vol = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
'id': volume_id,
'user_id': self.user_uuid,
'project_id': 'fake-project-id',
'snapshot_id': snapshot_id,
'host': None,
'size': size,
'availability_zone': availability_zone,
'instance_uuid': None,
'mountpoint': None,
'attach_time': timeutils.utcnow(),
'status': 'available',
'attach_status': 'detached',
'scheduled_at': None,
'launched_at': None,
'terminated_at': None,
'display_name': name,
'display_description': description,
'provider_location': 'fake-location',
'provider_auth': 'fake-auth',
'volume_type_id': 99,
'multiattach': False
}
def get(self, key, default=None):
return self.vol[key]
def __setitem__(self, key, value):
self.vol[key] = value
def __getitem__(self, key):
return self.vol[key]
class fake_snapshot(object):
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, volume_id, size, name, desc, id=None):
if id is None:
id = str(uuid.uuid4())
self.snap = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
'id': str(id),
'volume_id': volume_id,
'status': 'available',
'progress': '100%',
'volume_size': 1,
'display_name': name,
'display_description': desc,
'user_id': self.user_uuid,
'project_id': 'fake-project-id'
}
def get(self, key, default=None):
return self.snap[key]
def __setitem__(self, key, value):
self.snap[key] = value
def __getitem__(self, key):
return self.snap[key]
class API(object):
volume_list = []
snapshot_list = []
_instance = None
class Singleton(object):
def __init__(self):
self.API = None
def __init__(self):
if API._instance is None:
API._instance = API.Singleton()
self._EventHandler_instance = API._instance
def create(self, context, size, name, description, snapshot=None,
volume_type=None, metadata=None, availability_zone=None):
v = fake_volume(size, name,
description, None,
snapshot, volume_type,
metadata, availability_zone)
self.volume_list.append(v.vol)
LOG.info('creating volume %s', v.vol['id'])
return v.vol
def create_with_kwargs(self, context, **kwargs):
volume_id = kwargs.get('volume_id', None)
v = fake_volume(kwargs['size'],
kwargs['name'],
kwargs['description'],
str(volume_id),
None,
None,
None,
None)
if kwargs.get('status', None) is not None:
v.vol['status'] = kwargs['status']
if kwargs['host'] is not None:
v.vol['host'] = kwargs['host']
if kwargs['attach_status'] is not None:
v.vol['attach_status'] = kwargs['attach_status']
if kwargs.get('snapshot_id', None) is not None:
v.vol['snapshot_id'] = kwargs['snapshot_id']
self.volume_list.append(v.vol)
return v.vol
def get(self, context, volume_id):
if str(volume_id) == '87654321':
return {'id': volume_id,
'attach_time': '13:56:24',
'attach_status': 'attached',
'status': 'in-use'}
for v in self.volume_list:
if v['id'] == str(volume_id):
return v
raise exception.VolumeNotFound(volume_id=volume_id)
def get_all(self, context):
return self.volume_list
def delete(self, context, volume_id):
LOG.info('deleting volume %s', volume_id)
self.volume_list = [v for v in self.volume_list
if v['id'] != volume_id]
def check_attach(self, context, volume, instance=None):
if volume['status'] != 'available':
msg = "Status of volume '%s' must be available" % volume
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'attached':
msg = "already attached"
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
if instance['availability_zone'] != volume['availability_zone']:
msg = "Instance and volume not in same availability_zone"
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume, instance=None):
if volume['status'] == "available":
msg = "already detached"
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'detached':
msg = "Volume must be attached in order to detach."
raise exception.InvalidVolume(reason=msg)
if instance and not volume.get('attachments', {}).get(instance.uuid):
raise exception.VolumeUnattached(volume_id=volume['id'])
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
LOG.info('attaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
volume['attach_status'] = 'attached'
volume['attach_time'] = timeutils.utcnow()
volume['multiattach'] = True
volume['attachments'] = {instance_uuid:
{'attachment_id': str(uuid.uuid4()),
'mountpoint': mountpoint}}
def reset_fake_api(self, context):
del self.volume_list[:]
del self.snapshot_list[:]
def detach(self, context, volume_id, instance_uuid, attachment_id=None):
LOG.info('detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
volume['attach_status'] = 'detached'
def initialize_connection(self, context, volume_id, connector):
return {'driver_volume_type': 'iscsi', 'data': {}}
def terminate_connection(self, context, volume_id, connector):
return None
def get_snapshot(self, context, snapshot_id):
for snap in self.snapshot_list:
if snap['id'] == str(snapshot_id):
return snap
def get_all_snapshots(self, context):
return self.snapshot_list
def create_snapshot(self, context, volume_id, name, description, id=None):
volume = self.get(context, volume_id)
snapshot = fake_snapshot(volume['id'], volume['size'],
name, description, id)
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def create_snapshot_with_kwargs(self, context, **kwargs):
snapshot = fake_snapshot(kwargs.get('volume_id'),
kwargs.get('volume_size'),
kwargs.get('name'),
kwargs.get('description'),
kwargs.get('snap_id'))
status = kwargs.get('status', None)
snapshot.snap['status'] = status
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def create_snapshot_force(self, context, volume_id,
name, description, id=None):
volume = self.get(context, volume_id)
snapshot = fake_snapshot(volume['id'], volume['size'],
name, description, id)
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def delete_snapshot(self, context, snapshot_id):
self.snapshot_list = [s for s in self.snapshot_list
if s['id'] != snapshot_id]
def reserve_volume(self, context, volume_id):
LOG.info('reserving volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'attaching'
def unreserve_volume(self, context, volume_id):
LOG.info('unreserving volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
def begin_detaching(self, context, volume_id):
LOG.info('begin detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'detaching'
def roll_detaching(self, context, volume_id):
LOG.info('roll detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
|
|
#
# unit tests for getdns. Most of these test that
# attributes are readable and writable, although
# there are some functionality tests, as well,
# and some functionality testing is a byproduct
# of data tests
#
# TODO: break these out into a test suite format,
# add more functionality tests
import getdns
import unittest
import platform, sys, os, random, base64
un = platform.uname()
d = "../build/lib.{0}-{1}-{2}".format(
un[0].lower(), un[4], '.'.join(platform.python_version().split('.')[:2])
)
sys.path.insert(0, d)
class TestGetdnsMethods(unittest.TestCase):
def test_context(self):
c = getdns.Context()
self.assertIsNotNone(c)
del(c)
def test_bogus_attribute(self):
c = getdns.Context()
with self.assertRaises(AttributeError):
c.asdf
del(c)
def test_append_name(self):
c = getdns.Context()
c.append_name = getdns.APPEND_NAME_NEVER
self.assertEqual(c.append_name, getdns.APPEND_NAME_NEVER)
del(c)
def test_dns_root_servers(self):
c = getdns.Context()
addrs = [{'address_type': 'IPv4', 'address_data': '127.0.0.254'}]
c.dns_root_servers = addrs
self.assertEqual(c.dns_root_servers, addrs)
del(c)
def test_dns_transport_list(self):
c = getdns.Context()
transports = [getdns.TRANSPORT_TLS,
getdns.TRANSPORT_UDP,
getdns.TRANSPORT_TCP]
c.dns_transport_list = transports
self.assertEqual(c.dns_transport_list, transports)
del(c)
def test_dnssec_allowed_skew(self):
c = getdns.Context()
skew = 5
c.dnssec_allowed_skew = skew
self.assertEqual(c.dnssec_allowed_skew, skew)
del(c)
def test_edns_client_subnet_private(self):
c = getdns.Context()
p = 1
c.edns_client_subnet_private = p
self.assertEqual(c.edns_client_subnet_private, p)
del(c)
def test_edns_do_bit(self):
c = getdns.Context()
do = 1
c.edns_do_bit = do
self.assertEqual(c.edns_do_bit, do)
del(c)
def test_edns_extended_rcode(self):
c = getdns.Context()
r = 127
c.edns_extended_rcode = r
self.assertEqual(c.edns_extended_rcode, r)
del(c)
def test_edns_maximum_udp_payload_size(self):
c = getdns.Context()
s = 1024
c.edns_maximum_udp_payload_size = s
self.assertEqual(c.edns_maximum_udp_payload_size, s)
del(c)
def test_edns_version(self):
c = getdns.Context()
v = 2
c.edns_version = v
self.assertEqual(c.edns_version, v)
del(c)
# def test_follow_redirects(self):
# c = getdns.Context()
# c.follow_redirects = getdns.REDIRECTS_DO_NOT_FOLLOW
# self.assertEqual(c.follow_redirects, getdns.REDIRECTS_DO_NOT_FOLLOW)
# del(c)
def test_idle_timeout(self):
c = getdns.Context()
i = 5
c.idle_timeout = i
self.assertEqual(c.idle_timeout, i)
del(c)
def test_limit_outstanding_queries(self):
c = getdns.Context()
l = 4
c.limit_outstanding_queries = l
self.assertEqual(c.limit_outstanding_queries, l)
del(c)
del(l)
# def test_namespaces(self):
# c = getdns.Context()
# l = [ getdns.NAMESPACE_DNS, getdns.NAMESPACE_LOCALNAMES,
# getdns.NAMESPACE_NETBIOS, getdns.NAMESPACE_MDNS,
# getdns.NAMESPACE_NIS ]
# random.shuffle(l)
# c.namespaces = l
# self.assertEqual(c.namespaces, l)
# del(c)
# del(l)
def test_resolution_type(self):
c = getdns.Context()
r = getdns.RESOLUTION_STUB
c.resolution_type = r
self.assertEqual(c.resolution_type, r)
del(c)
del(r)
def test_timeout(self):
c = getdns.Context()
t = 1
c.timeout = t
self.assertEqual(c.timeout, t)
del(c)
del(t)
def test_tls_authentication(self):
c = getdns.Context()
t = getdns.AUTHENTICATION_NONE
c.tls_authentication = t
self.assertEqual(c.tls_authentication, t)
del(c)
del(t)
def test_tls_query_padding_blocksize(self):
c = getdns.Context()
b = 512
c.tls_query_padding_blocksize = b
self.assertEqual(c.tls_query_padding_blocksize, b)
del(c)
del(b)
def test_upstream_recursive_servers(self):
c = getdns.Context()
g = [
{'address_data': '8.8.8.8', 'address_type': 'IPv4'},
{'address_data': '8.8.4.4', 'address_type': 'IPv4'},
{'address_data': '2001:4860:4860::8888', 'address_type': 'IPv6'},
{'address_data': '2001:4860:4860::8844', 'address_type': 'IPv6'},
]
c.upstream_recursive_servers = g
self.assertEqual(c.upstream_recursive_servers, g)
del(c)
del(g)
def test_advanced_upstream_recursive(self):
c = getdns.Context()
c.resolution_type = getdns.RESOLUTION_STUB
u = [ { 'address_data': '185.49.141.37',
'address_type': 'IPv4',
'tsig_algorithm': 'hmac-md5.sig-alg.reg.int',
'tsig_name': 'hmac-md5.tsigs.getdnsapi.net',
'tsig_secret': base64.b64decode('16G69OTeXW6xSQ==')
} ]
c.upstream_recursive_servers = u
f = c.general('getdnsapi.net', request_type = getdns.RRTYPE_SOA)
self.assertEqual(f.replies_tree[0]['tsig_status'], getdns.DNSSEC_SECURE)
del(c)
del(u)
del(f)
def test_extensions(self):
c = getdns.Context()
e = { 'dnssec_return_status': getdns.EXTENSION_TRUE,
'dnssec_return_only_secure': getdns.EXTENSION_TRUE,
'dnssec_return_validation_chain': getdns.EXTENSION_TRUE,
'return_both_v4_and_v6': getdns.EXTENSION_TRUE,
'add_warning_for_bad_dns': getdns.EXTENSION_TRUE,
'return_call_reporting': getdns.EXTENSION_TRUE,
'specify_class': getdns.RRCLASS_IN
}
f = c.address('www.getdnsapi.net', extensions=e)
self.assertEqual(f.status, getdns.RESPSTATUS_GOOD)
del(c)
del(e)
def test_round_robin_upstreams(self):
c = getdns.Context()
i = 1
c.round_robin_upstreams = i
self.assertEqual(c.round_robin_upstreams, i)
del(c)
def test_tls_backoff_time(self):
c = getdns.Context()
i = 6000
c.tls_backoff_time = i
self.assertEqual(c.tls_backoff_time, i)
del(c)
def test_sync_address(self):
c = getdns.Context()
c.resolution_type = getdns.RESOLUTION_STUB
r = c.address('www.getdnsapi.net')
self.assertEqual(r.status, getdns.RESPSTATUS_GOOD)
self.assertTrue('185.49.141.37' in [x['address_data'] for x in
r.just_address_answers])
del(c)
del(r)
def test_sync_service(self):
c = getdns.Context()
c.resolution_type = getdns.RESOLUTION_STUB
r = c.service('_xmpp-server._tcp.jabber.org')
self.assertEqual(r.status, getdns.RESPSTATUS_GOOD)
del(c)
del(r)
def test_sync_hostname(self):
c = getdns.Context()
c.resolution_type = getdns.RESOLUTION_STUB
r = c.hostname({'address_type': 'IPv4',
'address_data': '185.49.141.37'})
self.assertEqual(r.status, getdns.RESPSTATUS_GOOD)
del(c)
del(r)
def test_sync_general(self):
c = getdns.Context()
c.resolution_type = getdns.RESOLUTION_STUB
r = c.general('nlnetlabs.nl', request_type=getdns.RRTYPE_NS)
self.assertEqual(r.status, getdns.RESPSTATUS_GOOD)
del(c)
del(r)
def test_file_to_list(self):
ns1 = {'class': 1,
'name': 'example.com.',
'rdata': {'nsdname': 'ns1.example.com.',
'rdata_raw': 'ns1.example.com.'},
'ttl': 3600,
'type': 2
}
ns2 = {'class': 1,
'name': 'example.com.',
'rdata': {'nsdname': 'ns2.example.com.',
'rdata_raw': 'ns2.example.com.'},
'ttl': 3600,
'type': 2
}
d = os.path.dirname(sys.argv[0])
f = open(('.' if d == '' else d) + '/example.com.zone')
r = getdns.file_to_list(f, 'example.com', 3600)
self.assertIsInstance(r, list)
self.assertEqual(r[1], ns1)
self.assertEqual(r[2], ns2)
f.close()
del(f)
del(r)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SSD Mobilenet V1 feature extractors.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 feature extractors in SSD.
"""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_feature_extractor
from object_detection.models import ssd_mobilenet_v1_keras_feature_extractor
slim = contrib_slim
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class SsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
if use_keras:
return (ssd_mobilenet_v1_keras_feature_extractor
.SSDMobileNetV1KerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
name='MobilenetV1'))
else:
return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers)
def test_extract_features_returns_correct_shapes_128(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_299(self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_with_dynamic_image_shape(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32),
(2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_keras):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
def test_preprocess_returns_correct_value_range(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=use_keras)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self, use_keras):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=use_keras)
def test_variable_count(self, use_keras):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=use_keras)
self.assertEqual(len(variables), 151)
def test_has_fused_batchnorm(self, use_keras):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=use_keras)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
if use_keras:
_ = feature_extractor(preprocessed_image)
else:
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
def test_extract_features_with_fewer_layers(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=use_keras)
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import fixtures
from lxml import etree
from nova.compute import arch
from nova.virt.libvirt import config as vconfig
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
allow_default_uri_connection = True
# Has libvirt connection been used at least once
connection_used = False
def _reset():
global allow_default_uri_connection
allow_default_uri_connection = True
# virDomainState
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
# NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h
VIR_DOMAIN_XML_SECURE = 1
VIR_DOMAIN_XML_INACTIVE = 2
VIR_DOMAIN_XML_UPDATE_CPU = 4
VIR_DOMAIN_XML_MIGRATABLE = 8
VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
VIR_DOMAIN_BLOCK_REBASE_COPY = 8
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
VIR_DOMAIN_EVENT_DEFINED = 0
VIR_DOMAIN_EVENT_UNDEFINED = 1
VIR_DOMAIN_EVENT_STARTED = 2
VIR_DOMAIN_EVENT_SUSPENDED = 3
VIR_DOMAIN_EVENT_RESUMED = 4
VIR_DOMAIN_EVENT_STOPPED = 5
VIR_DOMAIN_EVENT_SHUTDOWN = 6
VIR_DOMAIN_EVENT_PMSUSPENDED = 7
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
VIR_DOMAIN_AFFECT_LIVE = 1
VIR_DOMAIN_AFFECT_CONFIG = 2
VIR_CPU_COMPARE_ERROR = -1
VIR_CPU_COMPARE_INCOMPATIBLE = 0
VIR_CPU_COMPARE_IDENTICAL = 1
VIR_CPU_COMPARE_SUPERSET = 2
VIR_CRED_USERNAME = 1
VIR_CRED_AUTHNAME = 2
VIR_CRED_LANGUAGE = 3
VIR_CRED_CNONCE = 4
VIR_CRED_PASSPHRASE = 5
VIR_CRED_ECHOPROMPT = 6
VIR_CRED_NOECHOPROMPT = 7
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_TUNNELLED = 4
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_MIGRATE_NON_SHARED_INC = 128
VIR_NODE_CPU_STATS_ALL_CPUS = -1
VIR_DOMAIN_START_PAUSED = 1
# libvirtError enums
# (Intentionally different from what's in libvirt. We do this to check,
# that consumers of the library are using the symbolic names rather than
# hardcoding the numerical values)
VIR_FROM_QEMU = 100
VIR_FROM_DOMAIN = 200
VIR_FROM_NWFILTER = 330
VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_FROM_NODEDEV = 666
VIR_ERR_NO_SUPPORT = 3
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
VIR_ERR_OPERATION_FAILED = 510
VIR_ERR_OPERATION_INVALID = 55
VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
VIR_ERR_CONFIG_UNSUPPORTED = 951
VIR_ERR_NO_NODE_DEVICE = 667
VIR_ERR_NO_SECRET = 66
# Readonly
VIR_CONNECT_RO = 1
# virConnectBaselineCPU flags
VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
# snapshotCreateXML flags
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
# blockCommit flags
VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
# secret type
VIR_SECRET_USAGE_TYPE_NONE = 0
VIR_SECRET_USAGE_TYPE_VOLUME = 1
VIR_SECRET_USAGE_TYPE_CEPH = 2
VIR_SECRET_USAGE_TYPE_ISCSI = 3
# Libvirt version
FAKE_LIBVIRT_VERSION = 9011
class HostInfo(object):
def __init__(self, arch=arch.X86_64, kB_mem=4096,
cpus=2, cpu_mhz=800, cpu_nodes=1,
cpu_sockets=1, cpu_cores=2,
cpu_threads=1, cpu_model="Penryn",
cpu_vendor="Intel", numa_topology='',
cpu_disabled=None):
"""Create a new Host Info object
:param arch: (string) indicating the CPU arch
(eg 'i686' or whatever else uname -m might return)
:param kB_mem: (int) memory size in KBytes
:param cpus: (int) the number of active CPUs
:param cpu_mhz: (int) expected CPU frequency
:param cpu_nodes: (int) the number of NUMA cell, 1 for unusual
NUMA topologies or uniform
:param cpu_sockets: (int) number of CPU sockets per node if nodes > 1,
total number of CPU sockets otherwise
:param cpu_cores: (int) number of cores per socket
:param cpu_threads: (int) number of threads per core
:param cpu_model: CPU model
:param cpu_vendor: CPU vendor
:param numa_topology: Numa topology
:param cpu_disabled: List of disabled cpus
"""
self.arch = arch
self.kB_mem = kB_mem
self.cpus = cpus
self.cpu_mhz = cpu_mhz
self.cpu_nodes = cpu_nodes
self.cpu_cores = cpu_cores
self.cpu_threads = cpu_threads
self.cpu_sockets = cpu_sockets
self.cpu_model = cpu_model
self.cpu_vendor = cpu_vendor
self.numa_topology = numa_topology
self.disabled_cpus_list = cpu_disabled or []
@classmethod
def _gen_numa_topology(self, cpu_nodes, cpu_sockets, cpu_cores,
cpu_threads, kb_mem, numa_mempages_list=None):
topology = vconfig.LibvirtConfigCapsNUMATopology()
cpu_count = 0
for cell_count in range(cpu_nodes):
cell = vconfig.LibvirtConfigCapsNUMACell()
cell.id = cell_count
cell.memory = kb_mem / cpu_nodes
for socket_count in range(cpu_sockets):
for cpu_num in range(cpu_cores * cpu_threads):
cpu = vconfig.LibvirtConfigCapsNUMACPU()
cpu.id = cpu_count
cpu.socket_id = cell_count
cpu.core_id = cpu_num // cpu_threads
cpu.siblings = set([cpu_threads *
(cpu_count // cpu_threads) + thread
for thread in range(cpu_threads)])
cell.cpus.append(cpu)
cpu_count += 1
# Set mempages per numa cell. if numa_mempages_list is empty
# we will set only the default 4K pages.
if numa_mempages_list:
mempages = numa_mempages_list[cell_count]
else:
mempages = vconfig.LibvirtConfigCapsNUMAPages()
mempages.size = 4
mempages.total = cell.memory / mempages.size
mempages = [mempages]
cell.mempages = mempages
topology.cells.append(cell)
return topology
def get_numa_topology(self):
return self.numa_topology
VIR_DOMAIN_JOB_NONE = 0
VIR_DOMAIN_JOB_BOUNDED = 1
VIR_DOMAIN_JOB_UNBOUNDED = 2
VIR_DOMAIN_JOB_COMPLETED = 3
VIR_DOMAIN_JOB_FAILED = 4
VIR_DOMAIN_JOB_CANCELLED = 5
def _parse_disk_info(element):
disk_info = {}
disk_info['type'] = element.get('type', 'file')
disk_info['device'] = element.get('device', 'disk')
driver = element.find('./driver')
if driver is not None:
disk_info['driver_name'] = driver.get('name')
disk_info['driver_type'] = driver.get('type')
source = element.find('./source')
if source is not None:
disk_info['source'] = source.get('file')
if not disk_info['source']:
disk_info['source'] = source.get('dev')
if not disk_info['source']:
disk_info['source'] = source.get('path')
target = element.find('./target')
if target is not None:
disk_info['target_dev'] = target.get('dev')
disk_info['target_bus'] = target.get('bus')
return disk_info
def disable_event_thread(self):
"""Disable nova libvirt driver event thread.
The Nova libvirt driver includes a native thread which monitors
the libvirt event channel. In a testing environment this becomes
problematic because it means we've got a floating thread calling
sleep(1) over the life of the unit test. Seems harmless? It's not,
because we sometimes want to test things like retry loops that
should have specific sleep paterns. An unlucky firing of the
libvirt thread will cause a test failure.
"""
# because we are patching a method in a class MonkeyPatch doesn't
# auto import correctly. Import explicitly otherwise the patching
# may silently fail.
import nova.virt.libvirt.host # noqa
def evloop(*args, **kwargs):
pass
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
evloop))
class libvirtError(Exception):
"""This class was copied and slightly modified from
`libvirt-python:libvirt-override.py`.
Since a test environment will use the real `libvirt-python` version of
`libvirtError` if it's installed and not this fake, we need to maintain
strict compatibility with the original class, including `__init__` args
and instance-attributes.
To create a libvirtError instance you should:
# Create an unsupported error exception
exc = libvirtError('my message')
exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
self.err is a tuple of form:
(error_code, error_domain, error_message, error_level, str1, str2,
str3, int1, int2)
Alternatively, you can use the `make_libvirtError` convenience function to
allow you to specify these attributes in one shot.
"""
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
vol=None):
Exception.__init__(self, defmsg)
self.err = None
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
class NWFilter(object):
def __init__(self, connection, xml):
self._connection = connection
self._xml = xml
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def undefine(self):
self._connection._remove_filter(self)
class NodeDevice(object):
def __init__(self, connection, xml=None):
self._connection = connection
self._xml = xml
if xml is not None:
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def attach(self):
pass
def dettach(self):
pass
def reset(self):
pass
class Domain(object):
def __init__(self, connection, xml, running=False, transient=False):
self._connection = connection
if running:
connection._mark_running(self)
self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
self._transient = transient
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
tree = etree.fromstring(xml)
except etree.ParseError:
raise make_libvirtError(
libvirtError, "Invalid XML.",
error_code=VIR_ERR_XML_DETAIL,
error_domain=VIR_FROM_DOMAIN)
definition = {}
name = tree.find('./name')
if name is not None:
definition['name'] = name.text
uuid_elem = tree.find('./uuid')
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
vcpu = tree.find('./vcpu')
if vcpu is not None:
definition['vcpu'] = int(vcpu.text)
memory = tree.find('./memory')
if memory is not None:
definition['memory'] = int(memory.text)
os = {}
os_type = tree.find('./os/type')
if os_type is not None:
os['type'] = os_type.text
os['arch'] = os_type.get('arch', self._connection.host_info.arch)
os_kernel = tree.find('./os/kernel')
if os_kernel is not None:
os['kernel'] = os_kernel.text
os_initrd = tree.find('./os/initrd')
if os_initrd is not None:
os['initrd'] = os_initrd.text
os_cmdline = tree.find('./os/cmdline')
if os_cmdline is not None:
os['cmdline'] = os_cmdline.text
os_boot = tree.find('./os/boot')
if os_boot is not None:
os['boot_dev'] = os_boot.get('dev')
definition['os'] = os
features = {}
acpi = tree.find('./features/acpi')
if acpi is not None:
features['acpi'] = True
definition['features'] = features
devices = {}
device_nodes = tree.find('./devices')
if device_nodes is not None:
disks_info = []
disks = device_nodes.findall('./disk')
for disk in disks:
disks_info += [_parse_disk_info(disk)]
devices['disks'] = disks_info
nics_info = []
nics = device_nodes.findall('./interface')
for nic in nics:
nic_info = {}
nic_info['type'] = nic.get('type')
mac = nic.find('./mac')
if mac is not None:
nic_info['mac'] = mac.get('address')
source = nic.find('./source')
if source is not None:
if nic_info['type'] == 'network':
nic_info['source'] = source.get('network')
elif nic_info['type'] == 'bridge':
nic_info['source'] = source.get('bridge')
nics_info += [nic_info]
devices['nics'] = nics_info
definition['devices'] = devices
return definition
def create(self):
self.createWithFlags(0)
def createWithFlags(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
self._has_saved_state = False
def isActive(self):
return int(self._state == VIR_DOMAIN_RUNNING)
def undefine(self):
self._connection._undefine(self)
def undefineFlags(self, flags):
self.undefine()
if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
if self.hasManagedSaveImage(0):
self.managedSaveRemove()
def destroy(self):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
def ID(self):
return self._id
def name(self):
return self._def['name']
def UUIDString(self):
return self._def['uuid']
def interfaceStats(self, device):
return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
def blockStats(self, device):
return [2, 10000242400, 234, 2343424234, 34]
def suspend(self):
self._state = VIR_DOMAIN_PAUSED
def shutdown(self):
self._state = VIR_DOMAIN_SHUTDOWN
self._connection._mark_not_running(self)
def reset(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
def info(self):
return [self._state,
long(self._def['memory']),
long(self._def['memory']),
self._def['vcpu'],
123456789L]
def migrateToURI(self, desturi, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
self._def['devices']['disks'] += [disk_info]
return True
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
raise make_libvirtError(
libvirtError,
"AFFECT_LIVE only allowed for running domains!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
self.attachDevice(xml)
def detachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
return disk_info in self._def['devices']['disks']
def detachDeviceFlags(self, xml, _flags):
self.detachDevice(xml)
def XMLDesc(self, flags):
disks = ''
for disk in self._def['devices']['disks']:
disks += '''<disk type='%(type)s' device='%(device)s'>
<driver name='%(driver_name)s' type='%(driver_type)s'/>
<source file='%(source)s'/>
<target dev='%(target_dev)s' bus='%(target_bus)s'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>''' % disk
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
return '''<domain type='kvm'>
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
<memory>%(memory)s</memory>
<currentMemory>%(memory)s</currentMemory>
<vcpu>%(vcpu)s</vcpu>
<os>
<type arch='%(arch)s' machine='pc-0.12'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/kvm</emulator>
%(disks)s
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01'
function='0x1'/>
</controller>
%(nics)s
<serial type='file'>
<source path='dummy.log'/>
<target port='0'/>
</serial>
<serial type='pty'>
<source pty='/dev/pts/27'/>
<target port='1'/>
</serial>
<serial type='tcp'>
<source host="-1" service="-1" mode="bind"/>
</serial>
<console type='file'>
<source path='dummy.log'/>
<target port='0'/>
</console>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<graphics type='spice' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02'
function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</memballoon>
</devices>
</domain>''' % {'name': self._def['name'],
'uuid': self._def['uuid'],
'memory': self._def['memory'],
'vcpu': self._def['vcpu'],
'arch': self._def['os']['arch'],
'disks': disks,
'nics': nics}
def managedSave(self, flags):
self._connection._mark_not_running(self)
self._has_saved_state = True
def managedSaveRemove(self, flags):
self._has_saved_state = False
def hasManagedSaveImage(self, flags):
return int(self._has_saved_state)
def resume(self):
self._state = VIR_DOMAIN_RUNNING
def snapshotCreateXML(self, xml, flags):
tree = etree.fromstring(xml)
name = tree.find('./name').text
snapshot = DomainSnapshot(name, self)
self._snapshots[name] = snapshot
return snapshot
def vcpus(self):
vcpus = ([], [])
for i in range(0, self._def['vcpu']):
vcpus[0].append((i, 1, 120405L, i))
vcpus[1].append((True, True, True, True))
return vcpus
def memoryStats(self):
return {}
def maxMemory(self):
return self._def['memory']
def blockJobInfo(self, disk, flags):
return {}
def jobInfo(self):
return []
def jobStats(self, flags=0):
return {}
class DomainSnapshot(object):
def __init__(self, name, domain):
self._name = name
self._domain = domain
def delete(self, flags):
del self._domain._snapshots[self._name]
class Connection(object):
def __init__(self, uri=None, readonly=False, version=9011,
hv_version=1001000, host_info=None):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
else:
raise ValueError("URI was None, but fake libvirt is "
"configured to not accept this.")
uri_whitelist = ['qemu:///system',
'qemu:///session',
'lxc:///', # from LibvirtDriver.uri()
'xen:///', # from LibvirtDriver.uri()
'uml:///system',
'test:///default',
'parallels:///system']
if uri not in uri_whitelist:
raise make_libvirtError(
libvirtError,
"libvirt error: no connection driver "
"available for No connection for URI %s" % uri,
error_code=5, error_domain=0)
self.readonly = readonly
self._uri = uri
self._vms = {}
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
self._nodedevs = {}
self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = hv_version
self.host_info = host_info or HostInfo()
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
def _remove_filter(self, nwfilter):
del self._nwfilters[nwfilter._name]
def _add_nodedev(self, nodedev):
self._nodedevs[nodedev._name] = nodedev
def _remove_nodedev(self, nodedev):
del self._nodedevs[nodedev._name]
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
if dom._transient:
self._undefine(dom)
dom._id = -1
for (k, v) in self._running_vms.iteritems():
if v == dom:
del self._running_vms[k]
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
if not dom._transient:
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [self.host_info.arch,
self.host_info.kB_mem,
self.host_info.cpus,
self.host_info.cpu_mhz,
self.host_info.cpu_nodes,
self.host_info.cpu_sockets,
self.host_info.cpu_cores,
self.host_info.cpu_threads]
def numOfDomains(self):
return len(self._running_vms)
def listDomainsID(self):
return self._running_vms.keys()
def lookupByID(self, id):
if id in self._running_vms:
return self._running_vms[id]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching id %d' % id,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def lookupByName(self, name):
if name in self._vms:
return self._vms[name]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching name "%s"' % name,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def listAllDomains(self, flags):
vms = []
for vm in self._vms:
if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
if vm.state != VIR_DOMAIN_SHUTOFF:
vms.append(vm)
if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
if vm.state == VIR_DOMAIN_SHUTOFF:
vms.append(vm)
return vms
def _emit_lifecycle(self, dom, event, detail):
if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
return
cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
callback = cbinfo[0]
opaque = cbinfo[1]
callback(self, dom, event, detail, opaque)
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
if self._uri == 'qemu:///system':
return 'QEMU'
def getLibVersion(self):
return self.fakeLibVersion
def getVersion(self):
return self.fakeVersion
def getHostname(self):
return 'compute1'
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
def registerCloseCallback(self, cb, opaque):
pass
def getCPUMap(self):
"""Return calculated CPU map from HostInfo, by default showing 2
online CPUs.
"""
active_cpus = self.host_info.cpus
total_cpus = active_cpus + len(self.host_info.disabled_cpus_list)
cpu_map = [True if cpu_num not in self.host_info.disabled_cpus_list
else False for cpu_num in range(total_cpus)]
return (total_cpus, cpu_map, active_cpus)
def getCapabilities(self):
"""Return spoofed capabilities."""
numa_topology = self.host_info.get_numa_topology()
if isinstance(numa_topology, vconfig.LibvirtConfigCapsNUMATopology):
numa_topology = numa_topology.to_xml()
return '''<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='%(sockets)s' cores='%(cores)s' threads='%(threads)s'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
%(topology)s
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<pae/>
<nonpae/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='armv7l'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine canonical='bamboo-0.13'>bamboo</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
</capabilities>''' % {'sockets': self.host_info.cpu_sockets,
'cores': self.host_info.cpu_cores,
'threads': self.host_info.cpu_threads,
'topology': numa_topology}
def compareCPU(self, xml, flags):
tree = etree.fromstring(xml)
arch_node = tree.find('./arch')
if arch_node is not None:
if arch_node.text not in [arch.X86_64,
arch.I686]:
return VIR_CPU_COMPARE_INCOMPATIBLE
model_node = tree.find('./model')
if model_node is not None:
if model_node.text != self.host_info.cpu_model:
return VIR_CPU_COMPARE_INCOMPATIBLE
vendor_node = tree.find('./vendor')
if vendor_node is not None:
if vendor_node.text != self.host_info.cpu_vendor:
return VIR_CPU_COMPARE_INCOMPATIBLE
# The rest of the stuff libvirt implements is rather complicated
# and I don't think it adds much value to replicate it here.
return VIR_CPU_COMPARE_IDENTICAL
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000L,
'idle': 1592705190000000L,
'user': 26728850000000L,
'iowait': 6121490000000L}
else:
raise make_libvirtError(
libvirtError,
"invalid argument: Invalid cpu number",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def nwfilterLookupByName(self, name):
try:
return self._nwfilters[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nwfilter with matching name %s" % name,
error_code=VIR_ERR_NO_NWFILTER,
error_domain=VIR_FROM_NWFILTER)
def nwfilterDefineXML(self, xml):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
def nodeDeviceLookupByName(self, name):
try:
return self._nodedevs[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nodedev with matching name %s" % name,
error_code=VIR_ERR_NO_NODE_DEVICE,
error_domain=VIR_FROM_NODEDEV)
def listDefinedDomains(self):
return []
def listDevices(self, cap, flags):
return []
def baselineCPU(self, cpu, flag):
"""Add new libvirt API."""
return """<cpu mode='custom' match='exact'>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
<feature policy='require' name='aes'/>
</cpu>"""
def secretLookupByUsage(self, usage_type_obj, usage_id):
pass
def secretDefineXML(self, xml):
pass
def openAuth(uri, auth, flags=0):
if type(auth) != list:
raise Exception("Expected a list for 'auth' parameter")
if type(auth[0]) != list:
raise Exception("Expected a function in 'auth[0]' parameter")
if not callable(auth[1]):
raise Exception("Expected a function in 'auth[1]' parameter")
return Connection(uri, (flags == VIR_CONNECT_RO))
def virEventRunDefaultImpl():
time.sleep(1)
def virEventRegisterDefaultImpl():
if connection_used:
raise Exception("virEventRegisterDefaultImpl() must be "
"called before connection is used.")
def registerErrorHandler(handler, ctxt):
pass
def make_libvirtError(error_class, msg, error_code=None,
error_domain=None, error_message=None,
error_level=None, str1=None, str2=None, str3=None,
int1=None, int2=None):
"""Convenience function for creating `libvirtError` exceptions which
allow you to specify arguments in constructor without having to manipulate
the `err` tuple directly.
We need to pass in `error_class` to this function because it may be
`libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
`libvirt-python` is installed.
"""
exc = error_class(msg)
exc.err = (error_code, error_domain, error_message, error_level,
str1, str2, str3, int1, int2)
return exc
virDomain = Domain
virNodeDevice = NodeDevice
virConnect = Connection
class FakeLibvirtFixture(fixtures.Fixture):
"""Performs global setup/stubbing for all libvirt tests.
"""
def setUp(self):
super(FakeLibvirtFixture, self).setUp()
disable_event_thread(self)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.http import urlencode
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from horizon.workflows import views
INDEX_URL = reverse('horizon:project:access_and_security:index')
NAMESPACE = "horizon:project:access_and_security:floating_ips"
class FloatingIpViewTests(test.TestCase):
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
url = reverse('%s:associate' % NAMESPACE)
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'floating_ip_target_get_by_instance',
'tenant_floating_ip_list',)})
def test_associate_with_instance_id(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest), 'TEST-ID', self.servers.list()) \
.AndReturn('TEST-ID')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'instance_id': 'TEST-ID'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_with_port_id(self):
targets = [api.nova.FloatingIpTarget(s) for s in self.servers.list()]
targets[0].port_id = '101'
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(targets)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'port_id': '101'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_redirect(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
next = reverse("horizon:project:instances:index")
form_data = {'instance_id': server.id,
'next': next,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, next)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_exception(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'floating_ip_supported',
'tenant_floating_ip_get',
'tenant_floating_ip_list',),
api.neutron: ('is_extension_supported',)})
def test_disassociate_post(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'floating_ip_supported',
'tenant_floating_ip_get',
'tenant_floating_ip_list',),
api.neutron: ('is_extension_supported',)})
def test_disassociate_post_with_exception(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_attributes(self):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 10
sec_groups = self.security_groups.list()
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(
IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(sec_groups)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__floating_ips_tab")
allocate_action = self.getAndAssertTableAction(res, 'floating_ips',
'allocate')
self.assertEqual(set(['ajax-modal']), set(allocate_action.classes))
self.assertEqual('Allocate IP To Project',
six.text_type(allocate_action.verbose_name))
self.assertIsNone(allocate_action.policy_rules)
url = 'horizon:project:access_and_security:floating_ips:allocate'
self.assertEqual(url, allocate_action.url)
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_disabled_when_quota_exceeded(self):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 0
sec_groups = self.security_groups.list()
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(
IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(sec_groups)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__floating_ips_tab")
allocate_action = self.getAndAssertTableAction(res, 'floating_ips',
'allocate')
self.assertTrue('disabled' in allocate_action.classes,
'The create button should be disabled')
self.assertEqual('Allocate IP To Project (Quota exceeded)',
six.text_type(allocate_action.verbose_name))
class FloatingIpNeutronViewTests(FloatingIpViewTests):
def setUp(self):
super(FloatingIpViewTests, self).setUp()
self._floating_ips_orig = self.floating_ips
self.floating_ips = self.floating_ips_uuid
def tearDown(self):
self.floating_ips = self._floating_ips_orig
super(FloatingIpViewTests, self).tearDown()
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest)) \
.AndReturn(self.subnets.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(self.networks.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed_shared_networks(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest)) \
.AndReturn(self.subnets.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(list())
api.neutron.network_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(self.networks.list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
|
|
"""
Functions for generating and verifying JSON Web Tokens.
"""
from datetime import datetime, timedelta
from calendar import timegm
from os import urandom
from jwcrypto.jws import JWS, JWSHeaderRegistry
from jwcrypto.common import base64url_encode, base64url_decode, \
json_encode, json_decode
class _JWTError(Exception):
""" Exception raised if claim doesn't pass. Private to this module because
jwcrypto throws many exceptions too. """
pass
def generate_jwt(claims, priv_key=None,
algorithm='PS512', lifetime=None, expires=None,
not_before=None,
jti_size=16, other_headers=None):
"""
Generate a JSON Web Token.
:param claims: The claims you want included in the signature.
:type claims: dict
:param priv_key: The private key to be used to sign the token. Note: if you pass ``None`` then the token will be returned with an empty cryptographic signature and :obj:`algorithm` will be forced to the value ``none``.
:type priv_key: `jwcrypto.jwk.JWK <https://jwcrypto.readthedocs.io/en/latest/jwk.html>`_
:param algorithm: The algorithm to use for generating the signature. ``RS256``, ``RS384``, ``RS512``, ``PS256``, ``PS384``, ``PS512``, ``ES256``, ``ES384``, ``ES512``, ``HS256``, ``HS384``, ``HS512`` and ``none`` are supported.
:type algorithm: str
:param lifetime: How long the token is valid for.
:type lifetime: datetime.timedelta
:param expires: When the token expires (if :obj:`lifetime` isn't specified)
:type expires: datetime.datetime
:param not_before: When the token is valid from. Defaults to current time (if ``None`` is passed).
:type not_before: datetime.datetime
:param jti_size: Size in bytes of the unique token ID to put into the token (can be used to detect replay attacks). Defaults to 16 (128 bits). Specify 0 or ``None`` to omit the JTI from the token.
:type jti_size: int
:param other_headers: Any headers other than "typ" and "alg" may be specified, they will be included in the header.
:type other_headers: dict
:rtype: unicode
:returns: The JSON Web Token. Note this includes a header, the claims and a cryptographic signature. The following extra claims are added, per the `JWT spec <http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html>`_:
- **exp** (*IntDate*) -- The UTC expiry date and time of the token, in number of seconds from 1970-01-01T0:0:0Z UTC.
- **iat** (*IntDate*) -- The UTC date and time at which the token was generated.
- **nbf** (*IntDate*) -- The UTC valid-from date and time of the token.
- **jti** (*str*) -- A unique identifier for the token.
:raises:
ValueError: If other_headers contains either the "typ" or "alg" header
"""
header = {
'typ': 'JWT',
'alg': algorithm if priv_key else 'none'
}
if other_headers is not None:
redefined_keys = set(header.keys()) & set(other_headers.keys())
if redefined_keys:
raise ValueError('other_headers re-specified the headers: {}'.format(', '.join(redefined_keys)))
header.update(other_headers)
claims = dict(claims)
now = datetime.utcnow()
if jti_size:
claims['jti'] = base64url_encode(urandom(jti_size))
claims['nbf'] = timegm((not_before or now).utctimetuple())
claims['iat'] = timegm(now.utctimetuple())
if lifetime:
claims['exp'] = timegm((now + lifetime).utctimetuple())
elif expires:
claims['exp'] = timegm(expires.utctimetuple())
if header['alg'] == 'none':
signature = ''
else:
token = JWS(json_encode(claims))
token.allowed_algs = [header['alg']]
token.add_signature(priv_key, protected=header)
signature = json_decode(token.serialize())['signature']
return u'%s.%s.%s' % (
base64url_encode(json_encode(header)),
base64url_encode(json_encode(claims)),
signature
)
#pylint: disable=R0912,too-many-locals
def verify_jwt(jwt,
pub_key=None,
allowed_algs=None,
iat_skew=timedelta(),
checks_optional=False,
ignore_not_implemented=False):
"""
Verify a JSON Web Token.
:param jwt: The JSON Web Token to verify.
:type jwt: str or unicode
:param pub_key: The public key to be used to verify the token. Note: if you pass ``None`` and **allowed_algs** contains ``none`` then the token's signature will not be verified.
:type pub_key: `jwcrypto.jwk.JWK <https://jwcrypto.readthedocs.io/en/latest/jwk.html>`_
:param allowed_algs: Algorithms expected to be used to sign the token. The ``in`` operator is used to test membership.
:type allowed_algs: list or NoneType (meaning an empty list)
:param iat_skew: The amount of leeway to allow between the issuer's clock and the verifier's clock when verifiying that the token was generated in the past. Defaults to no leeway.
:type iat_skew: datetime.timedelta
:param checks_optional: If ``False``, then the token must contain the **typ** header property and the **iat**, **nbf** and **exp** claim properties.
:type checks_optional: bool
:param ignore_not_implemented: If ``False``, then the token must *not* contain the **jku**, **jwk**, **x5u**, **x5c** or **x5t** header properties.
:type ignore_not_implemented: bool
:rtype: tuple
:returns: ``(header, claims)`` if the token was verified successfully. The token must pass the following tests:
- Its header must contain a property **alg** with a value in **allowed_algs**.
- Its signature must verify using **pub_key** (unless its algorithm is ``none`` and ``none`` is in **allowed_algs**).
- If the corresponding property is present or **checks_optional** is ``False``:
- Its header must contain a property **typ** with the value ``JWT``.
- Its claims must contain a property **iat** which represents a date in the past (taking into account :obj:`iat_skew`).
- Its claims must contain a property **nbf** which represents a date in the past.
- Its claims must contain a property **exp** which represents a date in the future.
:raises: If the token failed to verify.
"""
if allowed_algs is None:
allowed_algs = []
if not isinstance(allowed_algs, list):
# jwcrypto only supports list of allowed algorithms
raise _JWTError('allowed_algs must be a list')
header, claims, _ = jwt.split('.')
parsed_header = json_decode(base64url_decode(header))
alg = parsed_header.get('alg')
if alg is None:
raise _JWTError('alg header not present')
if alg not in allowed_algs:
raise _JWTError('algorithm not allowed: ' + alg)
if not ignore_not_implemented:
for k in parsed_header:
if k not in JWSHeaderRegistry:
raise _JWTError('unknown header: ' + k)
if not JWSHeaderRegistry[k].supported:
raise _JWTError('header not implemented: ' + k)
if pub_key:
token = JWS()
token.allowed_algs = allowed_algs
token.deserialize(jwt, pub_key)
elif 'none' not in allowed_algs:
raise _JWTError('no key but none alg not allowed')
parsed_claims = json_decode(base64url_decode(claims))
utcnow = datetime.utcnow()
now = timegm(utcnow.utctimetuple())
typ = parsed_header.get('typ')
if typ is None:
if not checks_optional:
raise _JWTError('typ header not present')
elif typ != 'JWT':
raise _JWTError('typ header is not JWT')
iat = parsed_claims.get('iat')
if iat is None:
if not checks_optional:
raise _JWTError('iat claim not present')
elif iat > timegm((utcnow + iat_skew).utctimetuple()):
raise _JWTError('issued in the future')
nbf = parsed_claims.get('nbf')
if nbf is None:
if not checks_optional:
raise _JWTError('nbf claim not present')
elif nbf > now:
raise _JWTError('not yet valid')
exp = parsed_claims.get('exp')
if exp is None:
if not checks_optional:
raise _JWTError('exp claim not present')
elif exp <= now:
raise _JWTError('expired')
return parsed_header, parsed_claims
#pylint: enable=R0912
def process_jwt(jwt):
"""
Process a JSON Web Token without verifying it.
Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key.
:param jwt: The JSON Web Token to verify.
:type jwt: str or unicode
:rtype: tuple
:returns: ``(header, claims)``
"""
header, claims, _ = jwt.split('.')
parsed_header = json_decode(base64url_decode(header))
parsed_claims = json_decode(base64url_decode(claims))
return parsed_header, parsed_claims
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import multiprocessing
from swift_build_support.swift_build_support import host
from swift_build_support.swift_build_support import targets
from .. import argparse
from .. import defaults
__all__ = [
'HelpOption',
'SetOption',
'SetTrueOption',
'SetFalseOption',
'DisableOption',
'EnableOption',
'ChoicesOption',
'IntOption',
'StrOption',
'PathOption',
'AppendOption',
'UnsupportedOption',
'IgnoreOption',
'EXPECTED_OPTIONS',
'EXPECTED_DEFAULTS',
]
# -----------------------------------------------------------------------------
EXPECTED_DEFAULTS = {
'android': False,
'android_api_level': '21',
'android_deploy_device_path': '/data/local/tmp',
'android_icu_i18n': None,
'android_icu_i18n_include': None,
'android_icu_uc': None,
'android_icu_uc_include': None,
'android_icu_data': None,
'android_ndk': None,
'android_ndk_gcc_version': '4.9',
'android_arch': 'armv7',
'assertions': True,
'benchmark': False,
'benchmark_num_o_iterations': 3,
'benchmark_num_onone_iterations': 3,
'build_android': False,
'build_args': [],
'build_benchmarks': True,
'build_cygwin': True,
'build_external_benchmarks': False,
'build_foundation': False,
'build_freebsd': True,
'build_ios': True,
'build_ios_device': False,
'build_ios_simulator': False,
'build_jobs': multiprocessing.cpu_count(),
'build_libdispatch': False,
'build_libicu': False,
'build_linux': True,
'build_llbuild': False,
'build_lldb': False,
'build_libcxx': False,
'build_ninja': False,
'build_osx': True,
'build_playgroundsupport': False,
'build_runtime_with_host_compiler': False,
'build_stdlib_deployment_targets': ['all'],
'build_subdir': None,
'build_swift_dynamic_sdk_overlay': True,
'build_swift_dynamic_stdlib': True,
'build_swift_static_sdk_overlay': False,
'build_swift_static_stdlib': False,
'build_swift_stdlib_unittest_extra': False,
'build_swiftpm': False,
'build_swiftsyntax': False,
'build_libparser_only': False,
'build_skstresstester': False,
'build_swiftevolve': False,
'build_indexstoredb': False,
'build_sourcekitlsp': False,
'build_tvos': True,
'build_tvos_device': False,
'build_tvos_simulator': False,
'build_variant': 'Debug',
'build_watchos': True,
'build_watchos_device': False,
'build_watchos_simulator': False,
'build_xctest': False,
'clang_compiler_version': None,
'clang_profile_instr_use': None,
'clang_user_visible_version': defaults.CLANG_USER_VISIBLE_VERSION,
'clean': False,
'cmake': None,
'cmake_generator': 'Ninja',
'cmark_assertions': True,
'cmark_build_variant': 'Debug',
'compiler_vendor': defaults.COMPILER_VENDOR,
'coverage_db': None,
'cross_compile_hosts': [],
'darwin_deployment_version_ios':
defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
'darwin_deployment_version_osx':
defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
'darwin_deployment_version_tvos':
defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
'darwin_deployment_version_watchos':
defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
'darwin_xcrun_toolchain': None,
'distcc': False,
'dry_run': False,
'enable_asan': False,
'enable_lsan': False,
'enable_sanitize_coverage': False,
'enable_sil_ownership': False,
'disable_guaranteed_normal_arguments': False,
'enable_stdlibcore_exclusivity_checking': False,
'enable_tsan': False,
'enable_tsan_runtime': False,
'enable_ubsan': False,
'export_compile_commands': False,
'extra_cmake_options': [],
'extra_swift_args': [],
'force_optimized_typechecker': False,
'foundation_build_variant': 'Debug',
'host_cc': None,
'host_cxx': None,
'host_libtool': None,
'host_lipo': None,
'host_target': targets.StdlibDeploymentTarget.host_target().name,
'host_test': False,
'install_prefix': targets.install_prefix(),
'install_symroot': None,
'install_destdir': None,
'ios': False,
'ios_all': False,
'legacy_impl': False,
'libdispatch_build_variant': 'Debug',
'libicu_build_variant': 'Debug',
'lit_args': '-sv',
'llbuild_assertions': True,
'lldb_assertions': True,
'lldb_build_variant': 'Debug',
'lldb_build_with_xcode': '1',
'llvm_assertions': True,
'llvm_build_variant': 'Debug',
'llvm_max_parallel_lto_link_jobs':
host.max_lto_link_job_counts()['llvm'],
'llvm_targets_to_build': 'X86;ARM;AArch64;PowerPC;SystemZ;Mips',
'long_test': False,
'lto_type': None,
'show_sdks': False,
'skip_build': False,
'stdlib_deployment_targets': None,
'stress_test': False,
'swift_analyze_code_coverage': defaults.SWIFT_ANALYZE_CODE_COVERAGE,
'swift_assertions': True,
'swift_build_variant': 'Debug',
'swift_compiler_version': None,
'swift_stdlib_assertions': True,
'swift_stdlib_build_variant': 'Debug',
'swift_tools_max_parallel_lto_link_jobs':
host.max_lto_link_job_counts()['swift'],
'swift_user_visible_version': defaults.SWIFT_USER_VISIBLE_VERSION,
'symbols_package': None,
'test': None,
'test_android_host': False,
'test_cygwin': False,
'test_freebsd': False,
'test_ios': False,
'test_ios_32bit_simulator': True,
'test_ios_host': False,
'test_ios_simulator': False,
'test_linux': False,
'test_optimize_for_size': None,
'test_optimized': None,
'test_osx': False,
'test_paths': [],
'test_tvos': False,
'test_tvos_host': False,
'test_tvos_simulator': False,
'test_watchos': False,
'test_watchos_host': False,
'test_watchos_simulator': False,
'test_indexstoredb': False,
'test_sourcekitlsp': False,
'tvos': False,
'tvos_all': False,
'validation_test': None,
'verbose_build': False,
'watchos': False,
'watchos_all': False
}
# -----------------------------------------------------------------------------
def _sanitize_option_string(option_string):
if option_string.startswith('--'):
return option_string[2:].replace('-', '_')
if len(option_string) == 2 and option_string[0] == '-':
return option_string[1]
raise ValueError('invalid option_string format: ' + option_string)
class _BaseOption(object):
def __init__(self, option_string, dest=None, default=None):
if dest is None:
dest = _sanitize_option_string(option_string)
if default is None:
default = EXPECTED_DEFAULTS.get(dest, None)
self.option_string = option_string
self.dest = dest
self.default = default
def sanitized_string(self):
return _sanitize_option_string(self.option_string)
class HelpOption(_BaseOption):
"""Option that prints the help message and exits."""
pass
class SetOption(_BaseOption):
"""Option that accepts no arguments, setting the destination to a
hard-coded value or None.
"""
def __init__(self, *args, **kwargs):
self.value = kwargs.pop('value', None)
super(SetOption, self).__init__(*args, **kwargs)
class SetTrueOption(_BaseOption):
"""Option that accepts no arguments, setting the destination value to True
if parsed and defaulting to False otherwise.
"""
pass
class SetFalseOption(_BaseOption):
"""Option that accepts no arguments, setting the destination value to False
if parsed and defaulting to True otherwise.
"""
pass
class EnableOption(_BaseOption):
"""Option that sets the destination to True when parsed and False by default.
Can be toggled True or False with an optional bool argument.
"""
pass
class DisableOption(_BaseOption):
"""Option that sets the destination to False when parsed and True by default.
Can be toggled True or False with an optional bool argument, which is then
negated. Thus if an option is passed the value 'True' it will set the
destination to False and vice versa.
"""
pass
class ChoicesOption(_BaseOption):
"""Option that accepts an argument from a predifined list of choices."""
def __init__(self, *args, **kwargs):
self.choices = kwargs.pop('choices', None)
super(ChoicesOption, self).__init__(*args, **kwargs)
class IntOption(_BaseOption):
"""Option that accepts an int argument."""
pass
class StrOption(_BaseOption):
"""Option that accepts a str argument."""
pass
class PathOption(_BaseOption):
"""Option that accepts a path argument."""
pass
class AppendOption(_BaseOption):
"""Option that can be called more than once to append argument to internal
list.
"""
pass
class UnsupportedOption(_BaseOption):
"""Option that is not supported."""
pass
class IgnoreOption(_BaseOption):
"""Option that should be ignored when generating tests. Instead a test
should be written manually as the behavior cannot or should not be auto-
generated.
"""
pass
# -----------------------------------------------------------------------------
EXPECTED_OPTIONS = [
# Ignore the help options since they always call sys.exit(0)
HelpOption('-h', dest='help', default=argparse.SUPPRESS),
HelpOption('--help', dest='help', default=argparse.SUPPRESS),
SetOption('--debug', dest='build_variant', value='Debug'),
SetOption('--debug-cmark', dest='cmark_build_variant', value='Debug'),
SetOption('--debug-foundation',
dest='foundation_build_variant', value='Debug'),
SetOption('--debug-libdispatch',
dest='libdispatch_build_variant', value='Debug'),
SetOption('--debug-libicu', dest='libicu_build_variant', value='Debug'),
SetOption('--debug-lldb', dest='lldb_build_variant', value='Debug'),
SetOption('--lldb-build-with-xcode', dest='lldb_build_with_xcode',
value='1'),
SetOption('--lldb-build-with-cmake', dest='lldb_build_with_xcode',
value='0'),
SetOption('--debug-llvm', dest='llvm_build_variant', value='Debug'),
SetOption('--debug-swift', dest='swift_build_variant', value='Debug'),
SetOption('--debug-swift-stdlib',
dest='swift_stdlib_build_variant', value='Debug'),
SetOption('--eclipse',
dest='cmake_generator', value='Eclipse CDT4 - Ninja'),
SetOption('--make', dest='cmake_generator', value='Unix Makefiles'),
SetOption('--release', dest='build_variant', value='Release'),
SetOption('--release-debuginfo',
dest='build_variant', value='RelWithDebInfo'),
SetOption('--xcode', dest='cmake_generator', value='Xcode'),
SetOption('-R', dest='build_variant', value='Release'),
SetOption('-d', dest='build_variant', value='Debug'),
SetOption('-e', dest='cmake_generator', value='Eclipse CDT4 - Ninja'),
SetOption('-m', dest='cmake_generator', value='Unix Makefiles'),
SetOption('-r', dest='build_variant', value='RelWithDebInfo'),
SetOption('-x', dest='cmake_generator', value='Xcode'),
# FIXME: Convert these options to set_true actions
SetOption('--assertions', value=True),
SetOption('--cmark-assertions', value=True),
SetOption('--lldb-assertions', value=True),
SetOption('--llvm-assertions', value=True),
SetOption('--llbuild-assertions', value=True),
SetOption('--swift-assertions', value=True),
SetOption('--swift-stdlib-assertions', value=True),
SetOption('-T', dest='validation_test', value=True),
SetOption('-o', dest='test_optimized', value=True),
SetOption('-s', dest='test_optimize_for_size', value=True),
SetOption('-t', dest='test', value=True),
# FIXME: Convert these options to set_false actions
SetOption('--no-assertions', dest='assertions', value=False),
SetOption('--no-lldb-assertions', dest='lldb_assertions', value=False),
SetOption('--no-llvm-assertions', dest='llvm_assertions', value=False),
SetOption('--no-llbuild-assertions',
dest='llbuild_assertions', value=False),
SetOption('--no-swift-assertions', dest='swift_assertions', value=False),
SetOption('--no-swift-stdlib-assertions',
dest='swift_stdlib_assertions', value=False),
SetOption('--skip-ios', dest='ios', value=False),
SetOption('--skip-tvos', dest='tvos', value=False),
SetOption('--skip-watchos', dest='watchos', value=False),
SetTrueOption('--benchmark'),
SetTrueOption('--clean'),
SetTrueOption('--dry-run'),
SetTrueOption('--enable-sil-ownership'),
SetTrueOption('--disable-guaranteed-normal-arguments'),
SetTrueOption('--enable-stdlibcore-exclusivity-checking'),
SetTrueOption('--force-optimized-typechecker'),
SetTrueOption('--ios'),
SetTrueOption('--llbuild', dest='build_llbuild'),
SetTrueOption('--lldb', dest='build_lldb'),
SetTrueOption('--libcxx', dest='build_libcxx'),
SetTrueOption('--playgroundsupport', dest='build_playgroundsupport'),
SetTrueOption('--skip-build'),
SetTrueOption('--swiftpm', dest='build_swiftpm'),
SetTrueOption('--swiftsyntax', dest='build_swiftsyntax'),
SetTrueOption('--build-libparser-only', dest='build_libparser_only'),
SetTrueOption('--skstresstester', dest='build_skstresstester'),
SetTrueOption('--swiftevolve', dest='build_swiftevolve'),
SetTrueOption('-B', dest='benchmark'),
SetTrueOption('-S', dest='skip_build'),
SetTrueOption('-b', dest='build_llbuild'),
SetTrueOption('-c', dest='clean'),
SetTrueOption('-i', dest='ios'),
SetTrueOption('-l', dest='build_lldb'),
SetTrueOption('-n', dest='dry_run'),
SetTrueOption('-p', dest='build_swiftpm'),
SetTrueOption('--legacy-impl', dest='legacy_impl'),
EnableOption('--android'),
EnableOption('--build-external-benchmarks'),
EnableOption('--build-ninja'),
EnableOption('--build-runtime-with-host-compiler'),
EnableOption('--build-swift-dynamic-sdk-overlay'),
EnableOption('--build-swift-dynamic-stdlib'),
EnableOption('--build-swift-static-sdk-overlay'),
EnableOption('--build-swift-static-stdlib'),
EnableOption('--build-swift-stdlib-unittest-extra'),
EnableOption('--distcc'),
EnableOption('--enable-asan'),
EnableOption('--enable-lsan'),
EnableOption('--enable-sanitize-coverage'),
EnableOption('--enable-tsan'),
EnableOption('--enable-tsan-runtime'),
EnableOption('--enable-ubsan'),
EnableOption('--export-compile-commands'),
EnableOption('--foundation', dest='build_foundation'),
EnableOption('--host-test'),
EnableOption('--libdispatch', dest='build_libdispatch'),
EnableOption('--libicu', dest='build_libicu'),
EnableOption('--indexstore-db', dest='build_indexstoredb'),
EnableOption('--sourcekit-lsp', dest='build_sourcekitlsp'),
EnableOption('--long-test'),
EnableOption('--show-sdks'),
EnableOption('--stress-test'),
EnableOption('--test'),
EnableOption('--test-optimize-for-size'),
EnableOption('--test-optimized'),
EnableOption('--tvos'),
EnableOption('--validation-test'),
EnableOption('--verbose-build'),
EnableOption('--watchos'),
EnableOption('--xctest', dest='build_xctest'),
DisableOption('--skip-build-android', dest='build_android'),
DisableOption('--skip-build-benchmarks', dest='build_benchmarks'),
DisableOption('--skip-build-cygwin', dest='build_cygwin'),
DisableOption('--skip-build-freebsd', dest='build_freebsd'),
DisableOption('--skip-build-ios', dest='build_ios'),
DisableOption('--skip-build-ios-device', dest='build_ios_device'),
DisableOption('--skip-build-ios-simulator',
dest='build_ios_simulator'),
DisableOption('--skip-build-linux', dest='build_linux'),
DisableOption('--skip-build-osx', dest='build_osx'),
DisableOption('--skip-build-tvos', dest='build_tvos'),
DisableOption('--skip-build-tvos-device', dest='build_tvos_device'),
DisableOption('--skip-build-tvos-simulator',
dest='build_tvos_simulator'),
DisableOption('--skip-build-watchos', dest='build_watchos'),
DisableOption('--skip-build-watchos-device',
dest='build_watchos_device'),
DisableOption('--skip-build-watchos-simulator',
dest='build_watchos_simulator'),
DisableOption('--skip-test-android-host', dest='test_android_host'),
DisableOption('--skip-test-cygwin', dest='test_cygwin'),
DisableOption('--skip-test-freebsd', dest='test_freebsd'),
DisableOption('--skip-test-ios', dest='test_ios'),
DisableOption('--skip-test-ios-32bit-simulator',
dest='test_ios_32bit_simulator'),
DisableOption('--skip-test-ios-host', dest='test_ios_host'),
DisableOption('--skip-test-ios-simulator', dest='test_ios_simulator'),
DisableOption('--skip-test-linux', dest='test_linux'),
DisableOption('--skip-test-osx', dest='test_osx'),
DisableOption('--skip-test-tvos', dest='test_tvos'),
DisableOption('--skip-test-tvos-host', dest='test_tvos_host'),
DisableOption('--skip-test-tvos-simulator',
dest='test_tvos_simulator'),
DisableOption('--skip-test-watchos', dest='test_watchos'),
DisableOption('--skip-test-watchos-host', dest='test_watchos_host'),
DisableOption('--skip-test-watchos-simulator',
dest='test_watchos_simulator'),
DisableOption('--skip-test-indexstore-db', dest='test_indexstoredb'),
DisableOption('--skip-test-sourcekit-lsp', dest='test_sourcekitlsp'),
ChoicesOption('--android-ndk-gcc-version',
choices=['4.8', '4.9']),
ChoicesOption('--compiler-vendor',
choices=['none', 'apple']),
ChoicesOption('--swift-analyze-code-coverage',
choices=['false', 'not-merged', 'merged']),
ChoicesOption('--android-arch',
choices=['armv7', 'aarch64']),
StrOption('--android-api-level'),
StrOption('--build-args'),
StrOption('--build-stdlib-deployment-targets'),
StrOption('--darwin-deployment-version-ios'),
StrOption('--darwin-deployment-version-osx'),
StrOption('--darwin-deployment-version-tvos'),
StrOption('--darwin-deployment-version-watchos'),
StrOption('--darwin-xcrun-toolchain'),
StrOption('--host-target'),
StrOption('--lit-args'),
StrOption('--llvm-targets-to-build'),
PathOption('--android-deploy-device-path'),
PathOption('--android-icu-i18n'),
PathOption('--android-icu-i18n-include'),
PathOption('--android-icu-uc'),
PathOption('--android-icu-uc-include'),
PathOption('--android-icu-data'),
PathOption('--android-ndk'),
PathOption('--build-subdir'),
PathOption('--clang-profile-instr-use'),
PathOption('--cmake'),
PathOption('--coverage-db'),
PathOption('--host-cc'),
PathOption('--host-cxx'),
PathOption('--host-libtool'),
PathOption('--host-lipo'),
PathOption('--install-prefix'),
PathOption('--install-symroot'),
PathOption('--install-destdir'),
PathOption('--symbols-package'),
IntOption('--benchmark-num-o-iterations'),
IntOption('--benchmark-num-onone-iterations'),
IntOption('--jobs', dest='build_jobs'),
IntOption('--llvm-max-parallel-lto-link-jobs'),
IntOption('--swift-tools-max-parallel-lto-link-jobs'),
IntOption('-j', dest='build_jobs'),
AppendOption('--cross-compile-hosts'),
AppendOption('--extra-cmake-options'),
AppendOption('--extra-swift-args'),
AppendOption('--stdlib-deployment-targets'),
AppendOption('--test-paths'),
UnsupportedOption('--build-jobs'),
UnsupportedOption('--common-cmake-options'),
UnsupportedOption('--only-execute'),
UnsupportedOption('--skip-test-optimize-for-size'),
UnsupportedOption('--skip-test-optimized'),
# NOTE: LTO flag is a special case that acts both as an option and has
# valid choices
SetOption('--lto', dest='lto_type'),
ChoicesOption('--lto', dest='lto_type', choices=['thin', 'full']),
# NOTE: We'll need to manually test the behavior of these since they
# validate compiler version strings.
IgnoreOption('--clang-compiler-version'),
IgnoreOption('--clang-user-visible-version'),
IgnoreOption('--swift-compiler-version'),
IgnoreOption('--swift-user-visible-version'),
# TODO: Migrate to unavailable options once new parser is in place
IgnoreOption('-I'),
IgnoreOption('--ios-all'),
IgnoreOption('--tvos-all'),
IgnoreOption('--watchos-all'),
]
|
|
# -*- coding: utf-8 -*-
import serial
import commands
import RPi.GPIO as GPIO
import time
import warnings
def moduleCall(command, exString, row, timeout):
port = None
for i in range(0, 9):
try:
port = serial.Serial("/dev/ttyACM%i" % i, baudrate=115200, timeout=1)
break
except:
pass
if port is None:
return "SERIAL_ERROR"
start= time.time()
at = command
port.write(at+"\r")
data = ""
flag = 0
cnt = 0
result = ""
while True:
ctime= time.time()-start
if ctime > timeout :
result="TIMEOUT"
break
if port.inWaiting() > 0 :
tmp = port.read(1)
if tmp == "\r":
if data == at:
data = ""
continue
elif tmp == "\n":
cnt = cnt + 1
flag = 1
else:
data = data+tmp
if flag == 1:
if cnt == row:
result = data
if data == exString:
#print "OK exit"
data = ""
flag = 0
break
elif data == "REJECT" or data == "ERROR":
result = data
data = ""
flag = 0
break
else:
data = ""
flag = 0
port.close()
return result
def getVersion():
ret= moduleCall("at+gmr", "OK", 1, 1.0)
if ret.startswith("+GMR: "):
return ret.split(" ")[1]
else:
return ret
def getDatetime():
return moduleCall("at$31?", "OK", 1, 1.0)
def getPacketstat():
return moduleCall("at$36?", "OK", 1, 1.0)
def getSimstat():
return moduleCall("at$20?", "OK", 1, 1.0)
def getTelno():
return moduleCall("at$21?", "OK", 1, 1.0)
def getIccid():
return moduleCall("at$19?", "OK", 1, 1.0)
def getArea():
return moduleCall("at+cad?", "OK", 1, 1.0)
def getAntenna():
return moduleCall("at$30=0", "OK", 1, 1.0)
def getAntena():
print('[deprecated] use getAntenna() instead')
return getAntenna()
def getGpsid():
return moduleCall("at@74?", "OK", 1, 1.0)
def getGpspass():
return moduleCall("at@75?", "OK", 1, 1.0)
def getSimSelected():
return moduleCall("at$18?", "OK", 1, 1.0)
def setIpaddress(ip):
if False == ip.startswith("192.168.225."):
return "ERROR"
i= int(ip.split(".")[3])
if i < 8 or 224 < i:
return "ERROR"
commands.getoutput("ifconfig usb0 "+ip)
commands.getoutput("route add -net 0.0.0.0 gw 192.168.225.1 netmask 0.0.0.0 usb0")
commands.getoutput("route del -net 0.0.0.0 eth0")
return "OK"
def setDmzip(ip):
return moduleCall("at+dmzip="+ip, "OK", 2, 1.0)
def setSelectsim():
return moduleCall("at$18=2", "OK", 2, 1.0)
def unsetSelectsim():
return moduleCall("at$18=1", "OK", 2, 1.0)
def setGpsid(id):
return moduleCall("at@74="+id, "OK", 2, 1.0)
def setGpspass(p,mode):
return moduleCall("at@75="+p+","+mode, "OK", 2, 1.0)
def reqConnect(apn=None,id=None,p=None):
if apn and id and p:
ret= moduleCall("at$52=2", "OK", 2, 1.0)
if ret != "OK":
return ret
ret= moduleCall("at+aapn="+apn, "OK", 2, 1.0)
if ret != "OK":
return ret
ret= moduleCall("at+ccallid="+id+","+p+",1,0", "OK", 2, 1.0)
if ret != "OK":
return ret
ret= moduleCall("at+ccall=1", "OK", 2, 5.0)
if ret != "OK":
return ret
return "OK"
def reqDisconnect():
return moduleCall("at+ccall=0", "OK", 2, 5.0)
def reqOta():
return moduleCall("at@30", "OK", 2, 0.5)
def reqGpsStart(id,p,timeout):
ret= moduleCall("at@74="+id, "OK", 2, 1.0)
if ret != "OK":
return ret
ret= moduleCall("at@75="+p+",0", "OK", 2, 1.0)
if ret != "OK":
return ret
ret= moduleCall("at@72", "OK", 2, 1.0)
if ret != "OK":
return ret
return moduleCall("", "GPSOK", 1, timeout)
def reqGpsStop():
return moduleCall("at@73", "OK", 2, 1.0)
def reqInit():
return moduleCall("at&f0", "OK", 2, 2.0)
def reqWrite():
return moduleCall("at&w0", "OK", 2, 1.0)
def reqVbusOn():
#commands.getoutput("echo 5 > /sys/class/gpio/export")
#commands.getoutput("echo out > /sys/class/gpio/gpio5/direction")
commands.getoutput("echo 1 > /sys/class/gpio/gpio5/value")
return "OK"
def reqVbusOff():
#commands.getoutput("echo 5 > /sys/class/gpio/export")
#commands.getoutput("echo out > /sys/class/gpio/gpio5/direction")
commands.getoutput("echo 0 > /sys/class/gpio/gpio5/value")
return "OK"
def reqPowerOn():
commands.getoutput("echo 0 > /sys/class/gpio/gpio12/value")
commands.getoutput("echo 1 > /sys/class/gpio/gpio12/value")
time.sleep(2.2)
commands.getoutput("echo 0 > /sys/class/gpio/gpio12/value")
return "OK"
def reqPowerOff():
commands.getoutput("echo 0 > /sys/class/gpio/gpio12/value")
commands.getoutput("echo 1 > /sys/class/gpio/gpio12/value")
time.sleep(2.5)
commands.getoutput("echo 0 > /sys/class/gpio/gpio12/value")
return "OK"
def getPsHold():
return commands.getoutput("cat /sys/class/gpio/gpio20/value")
def getRi():
return commands.getoutput("cat /sys/class/gpio/gpio16/value")
def getAreaInd():
return commands.getoutput("cat /sys/class/gpio/gpio6/value")
|
|
"""
******** Models for test_data.py ***********
The following classes are for testing basic data marshalling, including
NULL values, where allowed.
The basic idea is to have a model for each Django data type.
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from .base import BaseModel
class BinaryData(models.Model):
data = models.BinaryField(null=True)
class BooleanData(models.Model):
data = models.BooleanField(default=False)
class CharData(models.Model):
data = models.CharField(max_length=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class DecimalData(models.Model):
data = models.DecimalField(null=True, decimal_places=3, max_digits=5)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True)
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
class BigIntegerData(models.Model):
data = models.BigIntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class GenericIPAddressData(models.Model):
data = models.GenericIPAddressField(null=True)
class NullBooleanData(models.Model):
data = models.NullBooleanField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class Meta:
ordering = ('id',)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, models.SET_NULL, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor)
class O2OData(models.Model):
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, models.CASCADE, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', models.CASCADE, null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, models.SET_NULL, null=True, to_field='data')
class FKDataToO2O(models.Model):
data = models.ForeignKey(O2OData, models.SET_NULL, null=True)
class M2MIntermediateData(models.Model):
data = models.ManyToManyField(Anchor, through='Intermediate')
class Intermediate(models.Model):
left = models.ForeignKey(M2MIntermediateData, models.CASCADE)
right = models.ForeignKey(Anchor, models.CASCADE)
extra = models.CharField(max_length=30, blank=True, default="doesn't matter")
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True, default=False)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)
# class DatePKData(models.Model):
# data = models.DateField(primary_key=True)
# class DateTimePKData(models.Model):
# data = models.DateTimeField(primary_key=True)
class DecimalPKData(models.Model):
data = models.DecimalField(primary_key=True, decimal_places=3, max_digits=5)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
# class FilePKData(models.Model):
# data = models.FileField(primary_key=True)
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class GenericIPAddressPKData(models.Model):
data = models.GenericIPAddressField(primary_key=True)
# This is just a Boolean field with null=True, and we can't test a PK value of NULL.
# class NullBooleanPKData(models.Model):
# data = models.NullBooleanField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class UUIDData(models.Model):
data = models.UUIDField(primary_key=True)
class FKToUUID(models.Model):
data = models.ForeignKey(UUIDData, models.CASCADE)
# Tests for handling fields with pre_save functions, or
# models with save functions that modify data
class AutoNowDateTimeData(models.Model):
data = models.DateTimeField(null=True, auto_now=True)
class ModifyingSaveData(models.Model):
data = models.IntegerField(null=True)
def save(self, *args, **kwargs):
"""
A save method that modifies the data in the object.
A user-defined save() method isn't called when objects are deserialized
(#4459).
"""
self.data = 666
super(ModifyingSaveData, self).save(*args, **kwargs)
# Tests for serialization of models using inheritance.
# Regression for #7202, #7350
class AbstractBaseModel(models.Model):
parent_data = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel(AbstractBaseModel):
child_data = models.IntegerField()
class InheritBaseModel(BaseModel):
child_data = models.IntegerField()
class ExplicitInheritBaseModel(BaseModel):
parent = models.OneToOneField(BaseModel, models.CASCADE, parent_link=True)
child_data = models.IntegerField()
class LengthModel(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import testtools
from neutron.agent.common import utils # noqa
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff promiscuity 0',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'5: foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'6: foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state '
'UP qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff '
'promiscuity 0',
'7: foo:foo@foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'8: foo@foo:foo: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0',
'9: bar.9@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 9 <REORDER_HDR>',
'10: bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc '
' noqueue master brq0b24798c-07 state UP mode DEFAULT'
'\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 10 <REORDER_HDR>',
'11: bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 11 <REORDER_HDR>',
'12: bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq '
'state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan id 12 <REORDER_HDR>',
'13: bar:bar@bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1q id 13 <REORDER_HDR>',
'14: bar@bar:bar@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 '
'qdisc mq state UP qlen 1000'
'\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff promiscuity 0'
'\ vlan protocol 802.1Q id 14 <REORDER_HDR>']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::3023:39ff:febc:22ae/64 scope link tentative
valid_lft forever preferred_lft forever
inet6 fe80::3023:39ff:febc:22af/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
GATEWAY_SAMPLE5 = ("""
default via 192.168.99.1 proto static
""")
GATEWAY_SAMPLE6 = ("""
default via 192.168.99.1 proto static metric 100
""")
IPv6_GATEWAY_SAMPLE1 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE2 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 100
""")
IPv6_GATEWAY_SAMPLE3 = ("""
2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE4 = ("""
default via fe80::dfcc:aaff:feb9:76ce
""")
IPv6_GATEWAY_SAMPLE5 = ("""
default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
RULE_V4_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
101: from 192.168.45.100 lookup 2
""")
RULE_V6_SAMPLE = ("""
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
201: from 2001:db8::1 lookup 3
""")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.common.utils.execute')
self.execute = self.execute_p.start()
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute(['o'], 'link', ('list',),
run_as_root=True)
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase()
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_run_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase(namespace='ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
run_as_root=True,
log_fail_as_error=True)
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
@mock.patch('os.path.islink')
@mock.patch('os.listdir', return_value=['lo'])
def test_get_devices(self, mocked_listdir, mocked_islink):
retval = ip_lib.IPWrapper().get_devices()
mocked_islink.assert_called_once_with('/sys/class/net/lo')
self.assertEqual(retval, [ip_lib.IPDevice('lo')])
@mock.patch('neutron.agent.common.utils.execute')
def test_get_devices_namespaces(self, mocked_execute):
fake_str = mock.Mock()
fake_str.split.return_value = ['lo']
mocked_execute.return_value = fake_str
retval = ip_lib.IPWrapper(namespace='foo').get_devices()
mocked_execute.assert_called_once_with(
['ip', 'netns', 'exec', 'foo', 'find', '/sys/class/net',
'-maxdepth', '1', '-type', 'l', '-printf', '%f '],
run_as_root=True, log_fail_as_error=True)
self.assertTrue(fake_str.split.called)
self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')])
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces()
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with([], 'netns', ('list',))
def test_add_tuntap(self):
ip_lib.IPWrapper().add_tuntap('tap0')
self.execute.assert_called_once_with([], 'tuntap',
('add', 'tap0', 'mode', 'tap'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth(self):
ip_lib.IPWrapper().add_veth('tap0', 'tap1')
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_del_veth(self):
ip_lib.IPWrapper().del_veth('fpr-1234')
self.execute.assert_called_once_with([], 'link',
('del', 'fpr-1234'),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with([], 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_get_device(self):
dev = ip_lib.IPWrapper(namespace='ns').device('eth0')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper()
with mock.patch.object(ip.netns, 'exists') as ns_exists:
with mock.patch('neutron.agent.common.utils.execute'):
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'),
run_as_root=True, namespace=None,
log_fail_as_error=True)])
ip_dev.assert_has_calls([mock.call('lo', namespace='ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper().ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper(namespace='ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_vxlan_valid_port_length(self):
retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0',
group='group0',
dev='dev0', ttl='ttl0',
tos='tos0',
local='local0', proxy=True,
port=('1', '2'))
self.assertIsInstance(retval, ip_lib.IPDevice)
self.assertEqual(retval.name, 'vxlan0')
self.execute.assert_called_once_with([], 'link',
['add', 'vxlan0', 'type',
'vxlan', 'id', 'vni0', 'group',
'group0', 'dev', 'dev0',
'ttl', 'ttl0', 'tos', 'tos0',
'local', 'local0', 'proxy',
'port', '1', '2'],
run_as_root=True, namespace=None,
log_fail_as_error=True)
def test_add_vxlan_invalid_port_length(self):
wrapper = ip_lib.IPWrapper()
self.assertRaises(exceptions.NetworkVxlanPortRangeError,
wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0',
dev='dev0', ttl='ttl0', tos='tos0',
local='local0', proxy=True,
port=('1', '2', '3'))
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper().add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
dev2 = ip_lib.IPDevice('tap0', namespace='ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', namespace='ns1')
self.assertIsNotNone(dev1)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run([], ('link', 'show'))
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run(['o'], ('link'))
self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))])
def test_as_root_namespace_false(self):
self.ip_cmd._as_root([], ('link'))
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=False)])
def test_as_root_namespace_true(self):
self.ip_cmd._as_root([], ('link'), use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root([],
'foo',
('link'),
use_root_namespace=True)])
def test_as_root_namespace_true_with_options(self):
self.ip_cmd._as_root('o', 'link', use_root_namespace=True)
self.ip.assert_has_calls(
[mock.call._as_root('o',
'foo',
('link'),
use_root_namespace=True)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRuleCommand, self).setUp()
self.parent._as_root.return_value = ''
self.command = 'rule'
self.rule_cmd = ip_lib.IpRuleCommand(self.parent)
def _test_add_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
self._assert_sudo([ip_version], ('add', 'from', ip,
'table', table, 'priority', priority))
def _test_add_rule_exists(self, ip, table, priority, output):
self.parent._as_root.return_value = output
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.add(ip, table, priority)
self._assert_sudo([ip_version], (['show']))
def _test_delete_rule(self, ip, table, priority):
ip_version = netaddr.IPNetwork(ip).version
self.rule_cmd.delete(ip, table, priority)
self._assert_sudo([ip_version],
('del', 'table', table,
'priority', priority))
def test_add_rule_v4(self):
self._test_add_rule('192.168.45.100', 2, 100)
def test_add_rule_v4_exists(self):
self._test_add_rule_exists('192.168.45.100', 2, 101, RULE_V4_SAMPLE)
def test_add_rule_v6(self):
self._test_add_rule('2001:db8::1', 3, 200)
def test_add_rule_v6_exists(self):
self._test_add_rule_exists('2001:db8::1', 3, 201, RULE_V6_SAMPLE)
def test_delete_rule_v4(self):
self._test_delete_rule('192.168.45.100', 2, 100)
def test_delete_rule_v6(self):
self._test_delete_rule('2001:db8::1', 3, 200)
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call(['o'], ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add('192.168.45.100/24')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'global',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_add_address_scoped(self):
self.addr_cmd.add('192.168.45.100/24', scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24',
'scope', 'link',
'dev', 'tap0',
'brd', '192.168.45.255'))
def test_del_address(self):
self.addr_cmd.delete('192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush(6)
self._assert_sudo([6], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(scope='global', dadfailed=False, tentative=False,
dynamic=False, cidr='172.16.77.240/24'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64'),
dict(scope='link', dadfailed=False, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22ae/64'),
dict(scope='link', dadfailed=True, tentative=True,
dynamic=False, cidr='fe80::3023:39ff:febc:22af/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64'),
dict(scope='global', dadfailed=False, tentative=False,
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64'),
dict(scope='link', dadfailed=False, tentative=False,
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(expected, self.addr_cmd.list())
self._assert_call([], ('show', 'tap0'))
def test_wait_until_address_ready(self):
self.parent._run.return_value = ADDR_SAMPLE
# this address is not tentative or failed so it should return
self.assertIsNone(self.addr_cmd.wait_until_address_ready(
'2001:470:9:1224:fd91:272:581e:3a32'))
def test_wait_until_address_ready_non_existent_address(self):
self.addr_cmd.list = mock.Mock(return_value=[])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready('abcd::1234')
def test_wait_until_address_ready_timeout(self):
tentative_address = 'fe80::3023:39ff:febc:22ae'
self.addr_cmd.list = mock.Mock(return_value=[
dict(scope='link', dadfailed=False, tentative=True, dynamic=False,
cidr=tentative_address + '/64')])
with testtools.ExpectedException(ip_lib.AddressNotReady):
self.addr_cmd.wait_until_address_ready(tentative_address,
wait_time=1)
def test_list_filtered(self):
expected = [
dict(scope='global', tentative=False, dadfailed=False,
dynamic=False, cidr='172.16.77.240/24')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
self.ip_version = 4
self.table = 14
self.metric = 100
self.cidr = '192.168.45.100/24'
self.ip = '10.0.0.1'
self.gateway = '192.168.45.100'
self.test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}},
{'sample': GATEWAY_SAMPLE5,
'expected': {'gateway': '192.168.99.1'}},
{'sample': GATEWAY_SAMPLE6,
'expected': {'gateway': '192.168.99.1',
'metric': 100}}]
def test_add_gateway(self):
self.route_cmd.add_gateway(self.gateway, self.metric, self.table)
self._assert_sudo([self.ip_version],
('replace', 'default',
'via', self.gateway,
'metric', self.metric,
'dev', self.parent.name,
'table', self.table))
def test_del_gateway(self):
self.route_cmd.delete_gateway(self.gateway, table=self.table)
self._assert_sudo([self.ip_version],
('del', 'default',
'via', self.gateway,
'dev', self.parent.name,
'table', self.table))
def test_get_gateway(self):
for test_case in self.test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
self._assert_sudo([4], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([4], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# NOTE(brian-haley) Currently we do not have any IPv6-specific usecase
# for pullup_route, hence skipping. Revisit, if required, in future.
if self.ip_version == 6:
return
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10', ip_version=4)
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
def test_add_route(self):
self.route_cmd.add_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('replace', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
def test_delete_route(self):
self.route_cmd.delete_route(self.cidr, self.ip, self.table)
self._assert_sudo([self.ip_version],
('del', self.cidr,
'via', self.ip,
'dev', self.parent.name,
'table', self.table))
class TestIPv6IpRouteCommand(TestIpRouteCommand):
def setUp(self):
super(TestIPv6IpRouteCommand, self).setUp()
self.ip_version = 6
self.cidr = '2001:db8::/64'
self.ip = '2001:db8::100'
self.gateway = '2001:db8::1'
self.test_cases = [{'sample': IPv6_GATEWAY_SAMPLE1,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE2,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 100}},
{'sample': IPv6_GATEWAY_SAMPLE3,
'expected': None},
{'sample': IPv6_GATEWAY_SAMPLE4,
'expected':
{'gateway': 'fe80::dfcc:aaff:feb9:76ce'}},
{'sample': IPv6_GATEWAY_SAMPLE5,
'expected':
{'gateway': '2001:470:9:1224:4508:b885:5fb:740b',
'metric': 1024}}]
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), use_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns',
'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_delete_namespace(self):
with mock.patch('neutron.agent.common.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), use_root_namespace=True)
def test_namespace_exists_use_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=True)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertTrue(
netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=True,
log_fail_as_error=True)
def test_namespace_doest_not_exist_no_helper(self):
self.config(group='AGENT', use_helper_for_ns_read=False)
retval = '\n'.join(NETNS_SAMPLE)
# need another instance to avoid mocking
netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase())
with mock.patch('neutron.agent.common.utils.execute') as execute:
execute.return_value = retval
self.assertFalse(
netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
execute.assert_called_once_with(['ip', '-o', 'netns', 'list'],
run_as_root=False,
log_fail_as_error=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
run_as_root=True,
check_exit_code=True,
extra_ok_codes=None)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.common.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env'] +
['%s=%s' % (k, v) for k, v in env.items()] +
['ip', 'link', 'list'],
run_as_root=True, check_exit_code=True, extra_ok_codes=None)
def test_execute_nosudo_with_no_namespace(self):
with mock.patch('neutron.agent.common.utils.execute') as execute:
self.parent.namespace = None
self.netns_cmd.execute(['test'])
execute.assert_called_once_with(['test'],
check_exit_code=True,
extra_ok_codes=None,
run_as_root=False)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with(['o'], 'link', ('show', 'eth0'),
log_fail_as_error=False)
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
def test_ensure_device_is_ready(self):
ip_lib_mock = mock.Mock()
with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock):
self.assertTrue(ip_lib.ensure_device_is_ready("eth0"))
self.assertTrue(ip_lib_mock.link.set_up.called)
ip_lib_mock.reset_mock()
# device doesn't exists
ip_lib_mock.link.set_up.side_effect = RuntimeError
self.assertFalse(ip_lib.ensure_device_is_ready("eth0"))
class TestIpNeighCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNeighCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'neigh'
self.neigh_cmd = ip_lib.IpNeighCommand(self.parent)
def test_add_entry(self):
self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('replace', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'nud', 'permanent',
'dev', 'tap0'))
def test_delete_entry(self):
self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd')
self._assert_sudo([4],
('del', '192.168.45.100',
'lladdr', 'cc:dd:ee:ff:ab:cd',
'dev', 'tap0'))
class TestArpPing(TestIPCmdBase):
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch('eventlet.spawn_n')
def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper):
spawn_n.side_effect = lambda f: f()
ARPING_COUNT = 3
address = '20.0.0.1'
config = mock.Mock()
config.send_arp_for_ha = ARPING_COUNT
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
address,
config)
self.assertTrue(spawn_n.called)
mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name)
ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name)
# Just test that arping is called with the right arguments
arping_cmd = ['arping', '-A',
'-I', mock.sentinel.iface_name,
'-c', ARPING_COUNT,
'-w', mock.ANY,
address]
ip_wrapper.netns.execute.assert_any_call(arping_cmd,
check_exit_code=True)
@mock.patch('eventlet.spawn_n')
def test_no_ipv6_addr_notif(self, spawn_n):
ipv6_addr = 'fd00::1'
config = mock.Mock()
config.send_arp_for_ha = 3
ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name,
mock.sentinel.iface_name,
ipv6_addr,
config)
self.assertFalse(spawn_n.called)
class TestAddNamespaceToCmd(base.BaseTestCase):
def test_add_namespace_to_cmd_with_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd,
ip_lib.add_namespace_to_cmd(cmd, 'tmp'))
def test_add_namespace_to_cmd_without_namespace(self):
cmd = ['ping', '8.8.8.8']
self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None))
|
|
""" test the label propagation module """
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn.semi_supervised import _label_propagation as label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from sklearn.datasets import make_classification
from sklearn.exceptions import ConvergenceWarning
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {"kernel": "rbf"}),
(label_propagation.LabelPropagation, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelPropagation,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
(label_propagation.LabelSpreading, {"kernel": "rbf"}),
(label_propagation.LabelSpreading, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelSpreading,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
]
def test_fit_transduction():
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2] == 1
def test_distribution():
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters["kernel"] == "knn":
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(
clf.predict_proba([[1.0, 0.0]]), np.array([[1.0, 0.0]]), 2
)
else:
assert_array_almost_equal(
np.asarray(clf.label_distributions_[2]), np.array([0.5, 0.5]), 2
)
def test_predict():
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(
clf.predict_proba([[1.0, 1.0]]), np.array([[0.5, 0.5]])
)
def test_label_spreading_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
clf = label_propagation.LabelSpreading().fit(X, y)
# adopting notation from Zhou et al (2004):
S = clf._build_graph()
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
Y = Y[:, :-1]
for alpha in [0.1, 0.3, 0.5, 0.7, 0.9]:
expected = np.dot(np.linalg.inv(np.eye(len(S)) - alpha * S), Y)
expected /= expected.sum(axis=1)[:, np.newaxis]
clf = label_propagation.LabelSpreading(max_iter=10000, alpha=alpha)
clf.fit(X, y)
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_label_propagation_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_valid_alpha():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
for alpha in [-0.1, 0, 1, 1.1, None]:
with pytest.raises(ValueError):
label_propagation.LabelSpreading(alpha=alpha).fit(X, y)
def test_convergence_speed():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=5000)
mdl.fit(X, y)
# this should converge quickly:
assert mdl.n_iter_ < 10
assert_array_equal(mdl.predict(X), [0, 1, 1])
def test_convergence_warning():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=1)
warn_msg = "max_iter=1 was reached without convergence."
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=1)
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=500)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=500)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
@pytest.mark.parametrize(
"LabelPropagationCls",
[label_propagation.LabelSpreading, label_propagation.LabelPropagation],
)
def test_label_propagation_non_zero_normalizer(LabelPropagationCls):
# check that we don't divide by zero in case of null normalizer
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/15946
# https://github.com/scikit-learn/scikit-learn/issues/9292
X = np.array([[100.0, 100.0], [100.0, 100.0], [0.0, 0.0], [0.0, 0.0]])
y = np.array([0, 1, -1, -1])
mdl = LabelPropagationCls(kernel="knn", max_iter=100, n_neighbors=1)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
def test_predict_sparse_callable_kernel():
# This is a non-regression test for #15866
# Custom sparse kernel (top-K RBF)
def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5):
nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=-1)
nn.fit(X)
W = -1 * nn.kneighbors_graph(Y, mode="distance").power(2) * gamma
np.exp(W.data, out=W.data)
assert issparse(W)
return W.T
n_classes = 4
n_samples = 500
n_test = 10
X, y = make_classification(
n_classes=n_classes,
n_samples=n_samples,
n_features=20,
n_informative=20,
n_redundant=0,
n_repeated=0,
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=0
)
model = label_propagation.LabelSpreading(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
model = label_propagation.LabelPropagation(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
|
|
import logging
import os
import json
from multiprocessing import Process, Queue, Lock
import numpy as np
from PyMaSC.core.mappability import MappableLengthCalculator
from PyMaSC.utils.progress import ProgressHook, MultiLineProgressManager
from PyMaSC.utils.compatible import tostr, xrange
from PyMaSC.utils.output import prepare_outdir
from PyMaSC.utils.calc import exec_worker_pool
logger = logging.getLogger(__name__)
class BWIOError(IOError):
pass
class JSONIOError(IOError):
pass
class NeedUpdate(Exception):
pass
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.long, np.float, np.float_)):
return float(obj)
elif isinstance(obj, (np.uint, np.int32, np.int64)):
return int(obj)
else:
return super(self, NumpyEncoder).default(obj)
class MappabilityHandler(MappableLengthCalculator):
@staticmethod
def calc_mappable_len_required_shift_size(readlen, max_shift):
return max_shift - readlen + 1 if max_shift > 2*readlen - 1 else readlen
def __init__(self, path, max_shift=0, readlen=0, map_path=None, nworker=1):
max_shift = self.calc_mappable_len_required_shift_size(readlen, max_shift)
self.nworker = nworker
if not os.access(path, os.R_OK):
reason = "file is unreadable." if os.path.isfile(path) else "no such file."
logger.critical("Failed to open '{}': {}".format(path, reason))
raise BWIOError
super(MappabilityHandler, self).__init__(path, max_shift)
self.close()
self._progress.disable_bar()
self.need_save_stats = True
if map_path:
self.map_path = map_path
else:
self.map_path = os.path.splitext(path)[0] + "_mappability.json"
if not os.path.exists(self.map_path):
self._check_saving_directory_is_writable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
elif not os.path.isfile(self.map_path):
logger.critical("Specified path is not file: '{}'".format(self.map_path))
raise JSONIOError
elif not os.access(self.map_path, os.R_OK):
logger.error("Failed to read '{}'".format(self.map_path))
else:
self._try_load_mappability_stats()
if self.need_save_stats:
self._check_stats_is_overwritable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
else:
logger.info("Use mappability stats read from '{}'".format(self.map_path))
def _check_saving_directory_is_writable(self):
dirname = os.path.dirname(self.map_path)
dirname = dirname if dirname else '.'
if not prepare_outdir(dirname, logger):
raise JSONIOError
def _try_load_mappability_stats(self):
try:
stats = self._read_mappability_stats()
except IOError as e:
logger.error("Failed to read '{}'".format(self.map_path))
logger.error("[Errno {}] {}".format(e.errno, e.message))
except (TypeError, OverflowError, ValueError, KeyError, IndexError) as e:
logger.error("Failed to load json file: '{}'".format(self.map_path))
except NeedUpdate:
pass
else:
self._load_mappability_stats(stats)
def _read_mappability_stats(self):
with open(self.map_path) as f:
stats = json.load(f)
for k in ("max_shift", "__whole__", "references"):
if k not in stats:
logger.error("Mandatory key '{}' not found.".format(k))
raise KeyError(k)
if stats["max_shift"] < self.max_shift:
logger.info("Specified shift length longer than former analysis. The stats will be updated.")
raise NeedUpdate
if stats["max_shift"] != len(stats["__whole__"]) - 1:
logger.error("Max shift length for whole genome unmatched.")
raise IndexError
for ref in self.chromsizes:
if ref not in stats["references"]:
logger.error("Reference '{}' not found.".format(ref))
raise KeyError(ref)
if stats["max_shift"] != len(stats["references"][ref]) - 1:
logger.error("Max shift length for 'ref' unmatched.".format(ref))
raise IndexError
return stats
def _load_mappability_stats(self, stats):
self.mappable_len = stats["__whole__"][:self.max_shift + 1]
self.chrom2mappable_len = {ref: b[:self.max_shift + 1] for ref, b in stats["references"].items()}
self.chrom2is_called = {ref: True for ref in self.chromsizes}
self.is_called = True
self.need_save_stats = False
def _check_stats_is_overwritable(self):
if not os.access(self.map_path, os.W_OK):
logger.critical("Failed to overwrite '{}'".format(self.map_path))
raise JSONIOError
else:
logger.warning("Existing file '{}' will be overwritten.".format(self.map_path))
def save_mappability_stats(self):
if not self.need_save_stats:
return logger.info("Mappability stats updating is not required.")
logger.info("Save mappable length to '{}'".format(self.map_path))
try:
with open(self.map_path, 'w') as f:
json.dump({
"max_shift": self.max_shift,
"__whole__": self.mappable_len,
"references": self.chrom2mappable_len
}, f, indent=4, sort_keys=True, cls=NumpyEncoder)
except IOError as e:
logger.error("Faild to output: {}\n[Errno {}] {}".format(
e.filename, e.errno, e.message))
self.need_save_stats = False
def calc_mappability(self):
target_chroms = [tostr(c) for c, b in self.chrom2is_called.items() if b is False]
if not target_chroms:
return self._sumup_mappability()
order_queue = Queue()
report_queue = Queue()
logger_lock = Lock()
progress = MultiLineProgressManager()
workers = [MappabilityCalcWorker(self.path, self.max_shift, order_queue, report_queue, logger_lock)
for _ in range(min(self.nworker, len(target_chroms)))]
with exec_worker_pool(workers, target_chroms, order_queue):
while not self.is_called:
chrom, obj = report_queue.get()
if chrom is None: # update progress
chrom, body = obj
with logger_lock:
progress.update(chrom, body)
else:
length = obj
self.chrom2mappable_len[chrom] = tuple(length)
self.chrom2is_called[chrom] = True
if all(self.chrom2is_called.values()):
self.is_called = True
with logger_lock:
progress.erase(chrom)
progress.clean()
self._sumup_mappability()
def _sumup_mappability(self):
for length in self.chrom2mappable_len.values():
for i in xrange(self.max_shift + 1):
self.mappable_len[i] += length[i]
class MappabilityCalcWorker(Process):
def __init__(self, path, max_shift, order_queue, report_queue, logger_lock):
super(MappabilityCalcWorker, self).__init__()
self.calculator = MappableLengthCalculator(path, max_shift, logger_lock)
self.calculator._progress.disable_bar()
self.order_queue = order_queue
self.report_queue = report_queue
self.logger_lock = logger_lock
self.calculator._progress = ProgressHook(report_queue)
def run(self):
with self.logger_lock:
logger.debug("{}: Hello. My pid is {}.".format(self.name, os.getpid()))
while True:
chrom = self.order_queue.get()
if chrom is None:
break
with self.logger_lock:
logger.debug("{}: Process {}...".format(self.name, chrom))
self.calculator.calc_mappability(chrom)
self.report_queue.put((chrom, self.calculator.chrom2mappable_len[chrom]))
with self.logger_lock:
logger.debug("{}: Goodbye.".format(self.name))
self.calculator.close()
|
|
#!/usr/bin/env python
import dbapi20
import unittest
import MySQLdb
class test_MySQLdb(dbapi20.DatabaseAPI20Test):
driver = MySQLdb
connect_args = ()
connect_kw_args = dict(db='test',
read_default_file='~/.my.cnf',
charset='utf8',
sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL")
def test_setoutputsize(self): pass
def test_setoutputsize_basic(self): pass
def test_nextset(self): pass
"""The tests on fetchone and fetchall and rowcount bogusly
test for an exception if the statement cannot return a
result set. MySQL always returns a result set; it's just that
some things return empty result sets."""
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
## self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.failUnless(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
## self.assertEqual(cur.fetchone(),None,
## 'cursor.fetchone should return None if no more rows available'
## )
self.failUnless(cur.rowcount in (-1,1))
finally:
con.close()
# Same complaint as for fetchall and fetchone
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount should be -1 after executing no-result '
## 'statements'
## )
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
## self.failUnless(cur.rowcount in (-1,1),
## 'cursor.rowcount should == number or rows inserted, or '
## 'set to -1 after executing an insert statement'
## )
cur.execute("select name from %sbooze" % self.table_prefix)
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount not being reset to -1 after executing '
## 'no-result statements'
## )
finally:
con.close()
def test_callproc(self):
pass # performed in test_MySQL_capabilities
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
sql="""
create procedure deleteme()
begin
select count(*) from %(tp)sbooze;
select name from %(tp)sbooze;
end
""" % dict(tp=self.table_prefix)
cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
cur.execute("drop procedure deleteme")
def test_nextset(self):
from warnings import warn
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
if s:
empty = cur.fetchall()
self.assertEquals(len(empty), 0,
"non-empty result set after other result sets")
#warn("Incompatibility: MySQL returns an empty result set for the CALL itself",
# Warning)
#assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
if __name__ == '__main__':
unittest.main()
print '''"Huh-huh, he said 'unit'." -- Butthead'''
|
|
#!/usr/bin/env python
"""
Remote application that interacts with rigs using rigctl protocol.
Please refer to:
http://gqrx.dk/
http://gqrx.dk/doc/remote-control
http://sourceforge.net/apps/mediawiki/hamlib/index.php?title=Documentation
Author: Rafael Marmelo
Author: Simone Marzona
License: MIT License
Copyright (c) 2014 Rafael Marmelo
Copyright (c) 2015 Simone Marzona
"""
import logging
import telnetlib
import socket
from rig_remote.constants import (
# DEFAULT_CONFIG,
ALLOWED_VFO_COMMANDS,
ALLOWED_SPLIT_MODES,
ALLOWED_PARM_COMMANDS,
ALLOWED_FUNC_COMMANDS,
RESET_CMD_DICT,
ALLOWED_RIGCTL_MODES,
RIG_TIMEOUT,
)
# logging configuration
logger = logging.getLogger(__name__)
# classes definition
class RigCtl(object):
"""Basic rigctl client implementation."""
def __init__(self, target):
"""implements the rig.
:param target: rig uri data
:type target: dict created from build_rig_uri
:raises TypeError: if the target is not a dict of 3 keys
"""
if not isinstance(target, dict) or not len(target.keys()) == 3:
logger.error("target is not of type dict "
"but {}".format(type(target)))
raise TypeError
self.target = target
def _request(self, request, target=None):
"""Main method implementing the rigctl protocol. It's wrapped by the
more specific methods that offer the specific functions.
:param request: string to send through the telnet connection
:type request: string
:raises: none
:returns response: response data
:response type: string
"""
if not target:
target = self.target
try:
con = telnetlib.Telnet(target["hostname"],
target["port"],
RIG_TIMEOUT)
except socket.timeout:
logger.error("Time out while connecting to {}:{}".format(target["hostname"],
["port"]))
raise
except socket.error:
logger.exception("Connection refused on {}:{}".format(["hostname"],
["port"]))
raise
con.write(('%s\n' % request).encode('ascii'))
response = con.read_some().decode('ascii').strip()
con.write('c\n'.encode('ascii'))
return response
def set_frequency(self, frequency, target=None):
"""Wrapper around _request. It configures the command for setting
a frequency.
"""
try:
float(frequency)
except ValueError:
logger.error("Frequency value must be a float, "
"got {}".format(frequency))
raise
return self._request('F %s' % frequency, target)
def get_frequency(self, target=None):
"""Wrapper around _request. It configures the command for getting
a frequency.
"""
output = self._request('f')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting radio "
"frequency, got {}".format(output))
raise ValueError
return self._request('f', target)
def set_mode(self, mode, target=None):
"""Wrapper around _request. It configures the command for setting
the mode.
"""
if not isinstance(mode, str) or mode not in ALLOWED_RIGCTL_MODES:
logger.error("Frequency mode must be a string in {}, "\
"got {}".format(ALLOWED_RIGCTL_MODES, mode))
raise ValueError
return self._request('M %s' % mode, target)
def get_mode(self, target=None):
"""Wrapper around _request. It configures the command for getting
the mode.
"""
# older versions of gqrx replies with only the mode (u'WFM_ST' as an example)
# newer versions replie with something like u'WFM_ST\n160000'
if "\n" in self._request('m'):
output = self._request('m').split("\n")[0]
else:
output = self._request('m')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting radio mode, "
"got {}".format(output))
raise ValueError
return output
def start_recording(self):
"""Wrapper around _request. It configures the command for starting
the recording.
"""
return self._request('AOS')
def stop_recording(self):
"""Wrapper around _request. It configures the command for stopping
the recording.
"""
return self._request('LOS')
def get_level(self):
"""Wrapper around _request. It configures the command for getting
the signal level.
"""
output = self._request('l')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting radio "
"signal level, got {}".format(output))
raise ValueError
return output
def set_vfo(self, vfo):
"""Wrapper around _request. It configures the command for setting
VFO.
"""
if vfo not in ALLOWED_VFO_COMMANDS:
logger.error("VFO value must be a string inclueded in {}, "
"got {}".format(ALLOWED_VFO_COMMANDS, vfo))
raise ValueError
return self._request('V %s' % vfo)
def get_vfo(self):
"""Wrapper around _request. It configures the command for getting
VFO.
"""
output = self._request('v')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting VFO, "
"got {}".format(output))
raise ValueError
return output
def set_rit(self, rit):
"""Wrapper around _request. It configures the command for getting
RIT.
"""
if not isinstance(rit, int):
logger.error("RIT value must be an int, "
"got {}".format(type(rit)))
raise ValueError
return self._request('J %s' % rit)
def get_rit(self):
"""Wrapper around _request. It configures the command for getting
RIT.
"""
output = self._request('j')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting RIT, "
"got {}".format(type(output)))
raise ValueError
return output
def set_xit(self, xit):
"""Wrapper around _request. It configures the command for getting
XIT.
"""
if not isinstance(xit, basestring):
logger.error("XIT value must be a string, "
"got {}".format(type(xit)))
raise ValueError
return self._request('J %s' % xit)
def get_xit(self):
"""Wrapper around _request. It configures the command for getting
XIT.
"""
output = self._request('j')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting XIT, "
"got {}".format(type(output)))
raise ValueError
return output
def set_split_freq(self, split_freq):
"""Wrapper around _request. It configures the command for setting
split frequency.
"""
if not isinstance(split_freq, int):
logger.error("XIT value must be an integer, "
"got {}".format(type(split_freq)))
raise ValueError
return self._request('I %s' % split_freq)
def get_split_freq(self):
"""Wrapper around _request. It configures the command for getting
XIT.
"""
output = self._request('i')
if not isinstance(output, int):
logger.error("Expected int while getting split_frequency, "
"got {}".format(type(output)))
raise ValueError
return output
def set_split_mode(self, split_mode):
"""Wrapper around _request. It configures the command for setting
slit frequency.
"""
if split_mode not in ALLOWED_SPLIT_MODES:
logger.error("split_mode value must be a string in {}, "
"got {}".format(ALLOWED_SPLIT_MODES,
type(split_mode)))
raise ValueError
return self._request('X %s' % split_mode)
def get_split_mode(self):
"""Wrapper around _request. It configures the command for getting
the split mode.
"""
output = self._request('x')
if not isinstance(output, str):
logger.error("Expected string while getting split_frequency_mode, "
"got {}".format(type(output)))
raise ValueError
return output
def set_func(self, func):
"""Wrapper around _request. It configures the command for getting
func.
"""
if func not in ALLOWED_FUNC_COMMANDS:
logger.error("func value must be a string inclueded in {}, "
"got {}".format(ALLOWED_FUNC_COMMANDS, func))
raise ValueError
return self._request('U %s' % func)
def get_func(self):
"""Wrapper around _request. It configures the command for getting
func.
"""
output = self._request('u')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting func, "
"got {}".format(output))
raise ValueError
return output
def set_parm(self, parm):
"""Wrapper around _request. It configures the command for getting
parm.
"""
if parm not in ALLOWED_PARM_COMMANDS:
logger.error("parm value must be a string inclueded in {}, "
"got {}".format(ALLOWED_PARM_COMMANDS, parm))
raise ValueError
return self._request('P %s' % parm)
def get_parm(self):
"""Wrapper around _request. It configures the command for getting
parm.
"""
output = self._request('p')
if not isinstance(output, basestring):
logger.error("Expected unicode string while getting parm, "
"got {}".format(output))
raise ValueError
return output
def set_antenna(self, antenna):
"""Wrapper around _request. It configures the command for setting
an antenna.
"""
if not isinstance(antenna, int):
logger.error("antenna value must be an int, "
"got {}".format(antenna))
raise ValueError
return self._request('Y %s' % antenna)
def get_antenna(self):
"""Wrapper around _request. It configures the command for getting
the antenna in use.
"""
output = self._request('f')
if not isinstance(output, int):
logger.error("Expected integer while getting radio antenna, "
"got {}".format(output))
raise ValueError
return self._request('y')
def rig_reset(self, reset_signal):
"""Wrapper around _request. It configures the command for resetting
the rig with various levels 0 = None, 1 = Software reset,
2 = VFO reset, 4 = Memory Clear reset, 8 = Master reset.
"""
if reset_signal not in RESET_CMD_DICT.keys():
logger.error("Reset_signal must be one of "
"{}.".format(RESET_CMD_DICT.keys()))
raise ValueError
return self._request('* %s' % reset_signal)
|
|
"""
.. todo::
WRITEME
"""
import os
import logging
import numpy
from theano.compat.six.moves import xrange
from pylearn2.datasets import cache, dense_design_matrix
from pylearn2.expr.preprocessing import global_contrast_normalize
from pylearn2.utils import contains_nan
from pylearn2.utils import serial
from pylearn2.utils import string_utils
_logger = logging.getLogger(__name__)
class CIFAR10(dense_design_matrix.DenseDesignMatrix):
"""
.. todo::
WRITEME
Parameters
----------
which_set : str
One of 'train', 'test'
center : WRITEME
rescale : WRITEME
gcn : float, optional
Multiplicative constant to use for global contrast normalization.
No global contrast normalization is applied, if None
start : WRITEME
stop : WRITEME
axes : WRITEME
toronto_prepro : WRITEME
preprocessor : WRITEME
"""
def __init__(self, which_set, center=False, rescale=False, gcn=None,
start=None, stop=None, axes=('b', 0, 1, 'c'),
toronto_prepro = False, preprocessor = None):
# note: there is no such thing as the cifar10 validation set;
# pylearn1 defined one but really it should be user-configurable
# (as it is here)
self.axes = axes
# we define here:
dtype = 'uint8'
ntrain = 50000
nvalid = 0 # artefact, we won't use it
ntest = 10000
# we also expose the following details:
self.img_shape = (3, 32, 32)
self.img_size = numpy.prod(self.img_shape)
self.n_classes = 10
self.label_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# prepare loading
fnames = ['data_batch_%i' % i for i in range(1, 6)]
datasets = {}
datapath = os.path.join(
string_utils.preprocess('${PYLEARN2_DATA_PATH}'),
'cifar10', 'cifar-10-batches-py')
for name in fnames + ['test_batch']:
fname = os.path.join(datapath, name)
if not os.path.exists(fname):
raise IOError(fname + " was not found. You probably need to "
"download the CIFAR-10 dataset by using the "
"download script in "
"pylearn2/scripts/datasets/download_cifar10.sh "
"or manually from "
"http://www.cs.utoronto.ca/~kriz/cifar.html")
datasets[name] = cache.datasetCache.cache_file(fname)
lenx = numpy.ceil((ntrain + nvalid) / 10000.) * 10000
x = numpy.zeros((lenx, self.img_size), dtype=dtype)
y = numpy.zeros((lenx, 1), dtype=dtype)
# load train data
nloaded = 0
for i, fname in enumerate(fnames):
_logger.info('loading file %s' % datasets[fname])
data = serial.load(datasets[fname])
x[i * 10000:(i + 1) * 10000, :] = data['data']
y[i * 10000:(i + 1) * 10000, 0] = data['labels']
nloaded += 10000
if nloaded >= ntrain + nvalid + ntest:
break
# load test data
_logger.info('loading file %s' % datasets['test_batch'])
data = serial.load(datasets['test_batch'])
# process this data
Xs = {'train': x[0:ntrain],
'test': data['data'][0:ntest]}
Ys = {'train': y[0:ntrain],
'test': data['labels'][0:ntest]}
X = numpy.cast['float32'](Xs[which_set])
y = Ys[which_set]
if isinstance(y, list):
y = numpy.asarray(y).astype(dtype)
if which_set == 'test':
assert y.shape[0] == 10000
y = y.reshape((y.shape[0], 1))
if center:
X -= 127.5
self.center = center
if rescale:
X /= 127.5
self.rescale = rescale
if toronto_prepro:
assert not center
assert not gcn
X = X / 255.
if which_set == 'test':
other = CIFAR10(which_set='train')
oX = other.X
oX /= 255.
X = X - oX.mean(axis=0)
else:
X = X - X.mean(axis=0)
self.toronto_prepro = toronto_prepro
self.gcn = gcn
if gcn is not None:
gcn = float(gcn)
X = global_contrast_normalize(X, scale=gcn)
if start is not None:
# This needs to come after the prepro so that it doesn't
# change the pixel means computed above for toronto_prepro
assert start >= 0
assert stop > start
assert stop <= X.shape[0]
X = X[start:stop, :]
y = y[start:stop, :]
assert X.shape[0] == y.shape[0]
if which_set == 'test':
assert X.shape[0] == 10000
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(CIFAR10, self).__init__(X=X, y=y, view_converter=view_converter,
y_labels=self.n_classes)
assert not contains_nan(self.X)
if preprocessor:
preprocessor.apply(self)
def adjust_for_viewer(self, X):
"""
.. todo::
WRITEME
"""
# assumes no preprocessing. need to make preprocessors mark the
# new ranges
rval = X.copy()
# patch old pkl files
if not hasattr(self, 'center'):
self.center = False
if not hasattr(self, 'rescale'):
self.rescale = False
if not hasattr(self, 'gcn'):
self.gcn = False
if self.gcn is not None:
rval = X.copy()
for i in xrange(rval.shape[0]):
rval[i, :] /= numpy.abs(rval[i, :]).max()
return rval
if not self.center:
rval -= 127.5
if not self.rescale:
rval /= 127.5
rval = numpy.clip(rval, -1., 1.)
return rval
def __setstate__(self, state):
super(CIFAR10, self).__setstate__(state)
# Patch old pkls
if self.y is not None and self.y.ndim == 1:
self.y = self.y.reshape((self.y.shape[0], 1))
if 'y_labels' not in state:
self.y_labels = 10
def adjust_to_be_viewed_with(self, X, orig, per_example=False):
"""
.. todo::
WRITEME
"""
# if the scale is set based on the data, display X oring the
# scale determined by orig
# assumes no preprocessing. need to make preprocessors mark
# the new ranges
rval = X.copy()
# patch old pkl files
if not hasattr(self, 'center'):
self.center = False
if not hasattr(self, 'rescale'):
self.rescale = False
if not hasattr(self, 'gcn'):
self.gcn = False
if self.gcn is not None:
rval = X.copy()
if per_example:
for i in xrange(rval.shape[0]):
rval[i, :] /= numpy.abs(orig[i, :]).max()
else:
rval /= numpy.abs(orig).max()
rval = numpy.clip(rval, -1., 1.)
return rval
if not self.center:
rval -= 127.5
if not self.rescale:
rval /= 127.5
rval = numpy.clip(rval, -1., 1.)
return rval
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return CIFAR10(which_set='test', center=self.center,
rescale=self.rescale, gcn=self.gcn,
toronto_prepro=self.toronto_prepro,
axes=self.axes)
|
|
import csv
import xml.etree.cElementTree as ET
import datetime
class ExportService():
'''
Create with the Service Manager!!!
'''
def __init__(self, series_service):
self._series_service = series_service
self.dt_format_str = "%m/%d/%Y %I:%M:%S %p"
def export_series_data(self, series_id, filename, utc=False, site=False, var=False, offset=False, qual=False,
src=False, qcl=False):
series = self._series_service.get_series_by_id(series_id)
if series is None:
return False
writer = csv.writer(open(filename, 'wb'))
print "log_file: ", filename
self.write_data_header(writer, utc, site, var, offset, qual, src, qcl)
for dv in series.data_values:
self.write_data_row(writer, series, dv, utc, site, var, offset, qual, src, qcl)
def write_data_row(self, writer, series, dv, utc, site, var, offset, qual, src, qcl):
data = [series.id, dv.id, dv.data_value, dv.value_accuracy, dv.local_date_time]
if utc:
data.append(dv.utc_offset)
data.append(dv.date_time_utc)
data.append(series.site_code)
if site:
data.append(series.site_name)
data.append(series.site.type)
data.append(series.site.latitude)
data.append(series.site.longitude)
data.append(series.site.spatial_ref.srs_name)
data.append(series.variable_code)
if var:
data.append(series.variable_name)
data.append(series.speciation)
data.append(series.variable_units_name)
data.append(series.variable.variable_unit.abbreviation)
data.append(series.sample_medium)
data.append(dv.offset_value)
data.append(dv.offset_type_id)
if offset:
if dv.offset_type is not None:
data.append(dv.offset_type.description)
data.append(dv.offset_type.unit.name)
else:
data.append('')
data.append('')
data.append(dv.censor_code)
data.append(dv.qualifier_id)
if qual:
if dv.qualifier is not None:
data.append(dv.qualifier.code)
data.append(dv.qualifier.description)
else:
data.append('')
data.append('')
if src:
data.append(series.organization)
data.append(series.source_description)
data.append(series.citation)
if qcl:
data.append(series.quality_control_level_code)
data.append(series.quality_control_level.definition)
data.append(series.quality_control_level.explanation)
data.append(dv.sample_id)
writer.writerow(data)
def write_data_header(self, writer, utc, site, var, offset, qual, src, qcl):
# Build header list
header = ["SeriesId", "ValueId", "DataValue", "ValueAccuracy", "LocalDateTime"]
if utc:
header.append("UTCOffset")
header.append("DateTimeUTC")
header.append("SiteCode")
if site:
header.append("SiteName")
header.append("SiteType")
header.append("Latitude")
header.append("Longitude")
header.append("SRSName")
header.append("VariableCode")
if var:
header.append("VariableName")
header.append("Speciation")
header.append("VariableUnitsName")
header.append("VariableUnitsAbbreviation")
header.append("SampleMedium")
header.append("OffsetValue")
header.append("OffsetTypeID")
if offset:
header.append("OffsetDescription")
header.append("OffsetUnitsName")
header.append("CensorCode")
header.append("QualifierID")
if qual:
header.append("QualifierCode")
header.append("QualifierDescription")
if src:
header.append("Organization")
header.append("SourceDescription")
header.append("Citation")
if qcl:
header.append("QualityControlLevelCode")
header.append("Definition")
header.append("Explanation")
header.append("SampleID")
writer.writerow(header)
def export_series_metadata(self, series_ids, filename):
if len(series_ids) == 0:
return
root = ET.Element("Metadata")
list_root = ET.SubElement(root, "DataSeriesList")
list_root.set("Total", str(len(series_ids)))
try:
with open(filename):
file_exists = True
except IOError:
file_exists = False
if file_exists:
# Read the file into the XML tree
pass
for series_id in series_ids:
series = self._series_service.get_series_by_id(series_id)
self.append_series_node(series, list_root)
tree = ET.ElementTree(root)
tree.write(filename)
def append_series_node(self, series, parent):
series_node = ET.SubElement(parent, "DataSeries")
series_node.set("ID", str(series.id))
self.append_general_info(series, series_node)
self.append_site_info(series, series_node)
self.append_var_info(series, series_node)
self.append_method_source_info(series, series_node)
self.append_misc_info(series, series_node)
return series_node
def append_general_info(self, series, parent):
meta = series.source.iso_metadata
general_node = ET.SubElement(parent, "GeneralInformation")
topic = ET.SubElement(general_node, "TopicCategory")
topic.text = meta.topic_category
title = ET.SubElement(general_node, "Title")
title.text = meta.title
abstract = ET.SubElement(general_node, "Abstract")
abstract.text = meta.abstract
prof_version = ET.SubElement(general_node, "ProfileVersion")
prof_version.text = meta.profile_version
metadata_link = ET.SubElement(general_node, "MetadataLink")
metadata_link.text = meta.metadata_link
date = ET.SubElement(general_node, "MetadataCreationDate")
# 7/1/2013 12:17:16 PM
date.text = datetime.datetime.now().strftime(self.dt_format_str)
def append_site_info(self, series, parent):
site = series.site
site_node = ET.SubElement(parent, "SiteInformation")
site_code = ET.SubElement(site_node, "SiteCode")
site_code.text = site.code
site_name = ET.SubElement(site_node, "SiteName")
site_name.text = site.name
site_type = ET.SubElement(site_node, "SiteType")
site_type.text = site.type
geo_coords = ET.SubElement(site_node, "GeographicCoordinates")
latitude = ET.SubElement(geo_coords, "Latitude")
latitude.text = str(site.latitude)
longitude = ET.SubElement(geo_coords, "Longitude")
longitude.text = str(site.longitude)
srs_id = ET.SubElement(geo_coords, "SRSID")
srs_id.text = str(site.spatial_ref.srs_id)
srs_name = ET.SubElement(geo_coords, "SRSName")
srs_name.text = site.spatial_ref.srs_name
is_geo = ET.SubElement(geo_coords, "IsGeographic")
is_geo.text = str(site.spatial_ref.is_geographic)
notes = ET.SubElement(geo_coords, "Notes")
notes.text = site.spatial_ref.notes
local_coords = ET.SubElement(site_node, "LocalCoordinates")
local_x = ET.SubElement(local_coords, "LocalX")
local_x.text = str(site.local_x)
local_y = ET.SubElement(local_coords, "LocalY")
local_y.text = str(site.local_y)
local_srs_id = ET.SubElement(local_coords, "SRSID")
local_srs_id.text = str(site.local_spatial_ref.srs_id)
local_srs_name = ET.SubElement(local_coords, "SRSName")
local_srs_name.text = site.local_spatial_ref.srs_name
local_is_geo = ET.SubElement(local_coords, "IsGeographic")
local_is_geo.text = str(site.local_spatial_ref.is_geographic)
local_notes = ET.SubElement(local_coords, "Notes")
local_notes.text = site.local_spatial_ref.notes
elevation = ET.SubElement(local_coords, "Elevation_m")
if site.elevation_m: elevation.text = str(site.elevation_m)
vert_datum = ET.SubElement(local_coords, "VerticalDatum")
if site.vertical_datum_id: vert_datum.text = str(site.vertical_datum_id)
pos_accuracy = ET.SubElement(site_node, "PosAccuracy_m")
pos_accuracy.text = str(site.pos_accuracy_m)
state = ET.SubElement(site_node, "State")
state.text = site.state
county = ET.SubElement(site_node, "County")
county.text = site.county
comments = ET.SubElement(site_node, "Comments")
comments.text = site.comments
def append_var_info(self, series, parent):
variable = series.variable
var_node = ET.SubElement(parent, "VariableInformation")
var_code = ET.SubElement(var_node, "VariableCode")
var_code.text = variable.code
var_name = ET.SubElement(var_node, "VariableName")
var_name.text = variable.name
speciation = ET.SubElement(var_node, "Speciation")
speciation.text = variable.speciation
var_units = ET.SubElement(var_node, "VariableUnits")
units_name = ET.SubElement(var_units, "UnitsName")
units_name.text = variable.variable_unit.name
units_type = ET.SubElement(var_units, "UnitsType")
units_type.text = variable.variable_unit.type
units_abbrev = ET.SubElement(var_units, "UnitsAbbreviation")
units_abbrev.text = variable.variable_unit.abbreviation
sample_medium = ET.SubElement(var_node, "SampleMedium")
sample_medium.text = variable.sample_medium
val_type = ET.SubElement(var_node, "ValueType")
val_type.text = variable.value_type
is_reg = ET.SubElement(var_node, "IsRegular")
is_reg.text = str(variable.is_regular)
time_support = ET.SubElement(var_node, "TimeSupport")
time_support.text = str(variable.time_support)
time_support_units = ET.SubElement(var_node, "TimeSupportUnits")
ts_units_name = ET.SubElement(time_support_units, "UnitsName")
ts_units_name.text = variable.time_unit.name
ts_units_type = ET.SubElement(time_support_units, "UnitsType")
ts_units_type.text = variable.time_unit.type
ts_units_abbrev = ET.SubElement(time_support_units, "UnitsAbbreviation")
ts_units_abbrev.text = variable.time_unit.abbreviation
data_type = ET.SubElement(var_node, "DataType")
data_type.text = variable.data_type
gen_cat = ET.SubElement(var_node, "GeneralCategory")
gen_cat.text = variable.general_category
no_dv = ET.SubElement(var_node, "NoDataValue")
no_dv.text = str(variable.no_data_value)
period = ET.SubElement(var_node, "PeriodOfRecord")
begin_dt = ET.SubElement(period, "BeginDateTime")
begin_dt.text = series.begin_date_time.strftime(self.dt_format_str)
end_dt = ET.SubElement(period, "EndDateTime")
end_dt.text = series.end_date_time.strftime(self.dt_format_str)
begin_dt_utc = ET.SubElement(period, "BeginDateTimeUTC")
begin_dt_utc.text = series.begin_date_time_utc.strftime(self.dt_format_str)
end_dt_utc = ET.SubElement(period, "EndDateTimeUTC")
end_dt_utc.text = series.end_date_time_utc.strftime(self.dt_format_str)
value_count = ET.SubElement(period, "ValueCount")
value_count.text = str(series.value_count)
def append_method_source_info(self, series, parent):
method = series.method
method_node = ET.SubElement(parent, "MethodInformation")
method_desc = ET.SubElement(method_node, "MethodDescription")
method_desc.text = method.description
method_link = ET.SubElement(method_node, "MethodLink")
method_link.text = method.link
source = series.source
source_node = ET.SubElement(parent, "SourceInformation")
org = ET.SubElement(source_node, "Organization")
org.text = source.organization
source_desc = ET.SubElement(source_node, "SourceDescription")
source_desc.text = source.description
source_link = ET.SubElement(source_node, "SourceLink")
source_link.text = source.link
contact = ET.SubElement(source_node, "Contact")
contact_name = ET.SubElement(contact, "ContactName")
contact_name.text = source.contact_name
phone = ET.SubElement(contact, "Phone")
phone.text = source.phone
email = ET.SubElement(contact, "Email")
email.text = source.email
address = ET.SubElement(contact, "Address")
address.text = source.address
city = ET.SubElement(contact, "City")
city.text = source.city
state = ET.SubElement(contact, "State")
state.text = source.state
zip_code = ET.SubElement(contact, "ZipCode")
zip_code.text = source.zip_code
citation = ET.SubElement(source_node, "Citation")
citation.text = source.citation
def append_misc_info(self, series, parent):
qcl = series.quality_control_level
qcl_node = ET.SubElement(parent, "QualityControlLevelInformation")
qcl_code = ET.SubElement(qcl_node, "QualityControlLevelCode")
qcl_code.text = qcl.code
qcl_def = ET.SubElement(qcl_node, "Definition")
qcl_def.text = qcl.definition
qcl_expl = ET.SubElement(qcl_node, "Explanation")
qcl_expl.text = qcl.explanation
offsets_node = ET.SubElement(parent, "OffsetInformation")
offsets = self._series_service.get_offset_types_by_series_id(series.id)
for offset in offsets:
offset_id = ET.SubElement(offsets_node, "Offset")
if offset:
offset_id.set("ID", str(offset.id))
else:
offset_id.set("ID", "")
offset_desc = ET.SubElement(offsets_node, "OffsetDescription")
if offset: offset_desc.text = offset.description
offset_units = ET.SubElement(offsets_node, "OffsetUnits")
units_name = ET.SubElement(offset_units, "UnitsName")
if offset: units_name.text = offset.unit.name
units_type = ET.SubElement(offset_units, "UnitsType")
if offset: units_type.text = offset.unit.type
units_abbrev = ET.SubElement(offset_units, "UnitsAbbreviation")
if offset: units_abbrev.text = offset.unit.abbreviation
qualifiers_node = ET.SubElement(parent, "QualifierInformation")
qualifiers = self._series_service.get_qualifiers_by_series_id(series.id)
for qual in qualifiers:
qual_id = ET.SubElement(qualifiers_node, "Qualifier")
if qual:
qual_id.set("ID", str(qual.id))
else:
qual_id.set("ID", "")
qual_code = ET.SubElement(qual_id, "QualiferCode")
if qual: qual_code.text = qual.code
qual_desc = ET.SubElement(qual_id, "QualifierDescription")
if qual: qual_desc.text = qual.description
samples_node = ET.SubElement(parent, "SampleInformation")
samples = self._series_service.get_samples_by_series_id(series.id)
for sample in samples:
sample_id = ET.SubElement(samples_node, "Sample")
if sample:
sample_id.set("ID", str(sample.id))
else:
sample_id.set("ID", "")
sample_type = ET.SubElement(sample_id, "SampleType")
if sample: sample_type.text = sample.type
lab_code = ET.SubElement(sample_id, "LabSampleCode")
if sample: lab_code.text = sample.lab_sample_code
lab_method_id = ET.SubElement(sample_id, "LabMethodID")
if sample: lab_method_id = sample.lab_method_id
lab_method_node = ET.SubElement(parent, "LabMethodInformation")
for sample in samples:
if sample: lab_method = sample.lab_method
lab_method_id = ET.SubElement(lab_method_node, "LabMethod")
if lab_method:
lab_method_id.set("ID", str(lab_method.id))
else:
lab_method_id.set("ID", "")
lab_name = ET.SubElement(lab_method_id, "LabName")
if lab_method: lab_name.text = lab_method.name
lab_org = ET.SubElement(lab_method_id, "LabOrganization")
if lab_method: lab_org.text = lab_method.organization
method_name = ET.SubElement(lab_method_id, "LabMethodName")
if lab_method: method_name.text = lab_method.method_name
method_desc = ET.SubElement(lab_method_id, "LabMethodDescription")
if lab_method: method_desc.text = lab_method.method_description
method_link = ET.SubElement(lab_method_id, "LabMethodLink")
if lab_method: method_link.text = lab_method.link
|
|
# -*- coding: utf-8 -*-
#
# inventory/categories/api/tests/test_categories_api.py
#
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from inventory.categories.models import Category
from inventory.common.api.tests.base_test import BaseTest
from inventory.projects.models import Membership
UserModel = get_user_model()
class TestCategoryAPI(BaseTest, APITestCase):
DEFAULT_USER = UserModel.ROLE_MAP[UserModel.DEFAULT_USER]
PROJECT_USER = Membership.ROLE_MAP[Membership.PROJECT_USER]
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
self.in_type = self._create_inventory_type()
members = [
{'user': self.user, 'role_text': self.PROJECT_USER}
]
self.project = self._create_project(self.in_type, members=members)
kwargs = {'public_id': self.project.public_id}
self.project_uri = reverse('project-detail', kwargs=kwargs)
def get_category_field(self, uri, field):
"""
Get a category and return the value of the provided field.
"""
response = self.client.get(uri, format='json')
return response.data.get(field)
def test_GET_category_list_with_invalid_permissions(self):
"""
Test the category_list endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_category_list_with_valid_permissions(self):
"""
Test the category_list endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-list')
self._test_users_with_valid_permissions(
uri, method, default_user=False)
self._test_project_users_with_valid_permissions(uri, method)
def test_POST_category_list_with_invalid_permissions(self):
"""
Test that a POST to category_list fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('category-list')
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_POST_category_list_with_valid_permissions(self):
"""
Test that a POST to category_list passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
uri = reverse('category-list')
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', su.copy())
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', su.copy())
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', su.copy())
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_OPTIONS_category_list_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
uri = reverse('category-list')
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_category_list_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
uri = reverse('category-list')
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_GET_category_detail_with_invalid_permissions(self):
"""
Test that a GET on the category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'get'
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_GET_category_detail_with_valid_permissions(self):
"""
Test that a GET to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'get'
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_PUT_category_detail_with_invalid_permissions(self):
"""
Test that a PUT to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PUT_category_detail_with_valid_permissions(self):
"""
Test that a PUT to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'put'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', su.copy())
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', su.copy())
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', su.copy())
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', su.copy())
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', su.copy())
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_PATCH_category_detail_with_invalid_permissions(self):
"""
Test that a PATCH to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_PATCH_category_detail_with_valid_permissions(self):
"""
Test that a PATCH to category_detail passes with valid permissions.
"""
#self.skipTest("Temporarily skipped")
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
method = 'patch'
data = {}
su = data.setdefault('SU', {})
su['name'] = 'TestCategory-01'
su['project'] = self.project_uri
ad = data.setdefault('AD', {})
ad['name'] = 'TestCategory-02'
du = data.setdefault('DU', {})
du['name'] = 'TestCategory-03'
self._test_users_with_valid_permissions(
uri, method, request_data=data)
pow = data.setdefault('POW', {})
pow['name'] = 'TestCategory-04'
pma = data.setdefault('PMA', {})
pma['name'] = 'TestCategory-05'
pdu = data.setdefault('PDU', {})
pdu['name'] = 'TestCategory-06'
self._test_project_users_with_valid_permissions(
uri, method, project_user=False, request_data=data)
def test_DELETE_category_detail_with_invalid_permissions(self):
"""
Test that a DELETE to category_detail fails with invalid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_DELETE_category_detail_with_valid_permissions(self):
"""
Test that a DELETE to category_detail pass' with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
# Test SUPERUSER
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_superuser_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test ADMINISTRATOR
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_administrator_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test DEFAULT_USER
## This is an invalid test since the DEFAULT_USER has no access.
# Test PROJECT_OWNER
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_project_owner_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_MANAGER
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_project_manager_with_valid_permissions(uri, method)
self._test_valid_GET_with_errors(uri)
# Test PROJECT_USER
## This is an invalid test since the PROJECT_USER has no access.
def test_OPTIONS_category_detail_with_invalid_permissions(self):
"""
Test that the method OPTIONS fails with invald permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'options'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_users_with_invalid_permissions(uri, method)
self._test_project_users_with_invalid_permissions(uri, method)
def test_OPTIONS_category_detail_with_valid_permissions(self):
"""
Test that the method OPTIONS brings back the correct data.
"""
method = 'options'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
self._test_users_with_valid_permissions(uri, method)
self._test_project_users_with_valid_permissions(uri, method)
def test_create_category_twice_to_same_parent(self):
"""
Test that a category is not created twice with the same composite key.
"""
#self.skipTest("Temporarily skipped")
# Create Category one.
uri = reverse('category-list')
new_data = {'name': 'TestCategory-1',
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg)
# Create Category two.
parent_uri = response.data.get('href')
uri = reverse('category-list')
new_data = {'name': 'TestCategory-2',
'parent': parent_uri,
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg)
# Create Category two again--should fail.
uri = reverse('category-list')
new_data = {'name': 'TestCategory-2',
'parent': parent_uri,
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
def test_delimitor_in_category_name(self):
"""
Test that the delimitor is not in the category name.
"""
#self.skipTest("Temporarily skipped")
# Create Category one.
uri = reverse('category-list')
new_data = {'name': 'Test{}Category-1'.format(
Category.DEFAULT_SEPARATOR),
'project': self.project_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
self.assertTrue(self._has_error(response, 'name'), msg)
self._test_errors(response, tests={
'name': u"A category name cannot ",
})
def test_category_is_not_parent(self):
"""
Test that this category does not exist in the current tree.
"""
#self.skipTest("Temporarily skipped")
# Create three catagories.
name = "Test Category 1"
cat0 = self._create_category(self.project, name=name)
name = "Test Category 2"
cat1 = self._create_category(self.project, name=name, parent=cat0)
name = "Test Category 3"
cat2 = self._create_category(self.project, name=name, parent=cat1)
# Try adding 'Test Category 2' to the tree using the API.
uri = reverse('category-list')
cat2_uri = reverse('category-detail',
kwargs={'public_id': cat2.public_id})
new_data = {'name': "Test Category 2",
'project': self.project_uri,
'parent': cat2_uri}
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
self.assertTrue(self._has_error(response, 'name'), msg)
self._test_errors(response, tests={
'name': u"A category in this tree ",
})
def test_root_level_category_exists(self):
"""
Test that there are no root level categories with this name that
already exist for this owner.
"""
#self.skipTest("Temporarily skipped")
# Create a catagory.
name = "Duplicate Name"
cat = self._create_category(self.project, name=name)
# Create a category through the API.
new_data = {'name': name,
'project': self.project_uri}
uri = reverse('category-list')
response = self.client.post(uri, new_data, format='json')
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
self.assertTrue(self._has_error(response, 'name'), msg)
self._test_errors(response, tests={
'name': u"A root level category name ",
})
def test_wrong_user_gets_no_results(self):
"""
Test that the request user gets no results of categories that are
in a different project. This test determines that a user of one
project does not have access to another project's objects.
"""
#self.skipTest("Temporarily skipped")
# Create new user
user, client = self._create_user(
username="SecondUser", password="0987654321")
# Create a new project
p_name = "Test Project_1"
project = self._create_project(self.in_type, name=p_name)
# Create a category
c_name = "Test Category 1"
category = self._create_category(project, name=c_name)
# GET category on the API
uri = reverse('category-list')
response = client.get(uri, format='json', **self.HEADERS)
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "You do not have permission to perform this action.",
})
# Test GET on a category detail
uri = reverse('category-detail',
kwargs={'public_id': category.public_id})
response = client.get(uri, format='json', **self.HEADERS)
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "You do not have permission to perform this action.",
})
# Test PUT to a specific category
data = {'name': 'Changed Category'}
response = client.patch(uri, data=data, format='json', **self.HEADERS)
msg = ("Response: {} should be {}, content: {}, user: {}, "
"project creator: {}").format(
response.status_code, status.HTTP_403_FORBIDDEN, response.data,
user, project.creator)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, msg)
self.assertTrue(self._has_error(response), msg)
self._test_errors(response, tests={
'detail': "You do not have permission to perform this action.",
})
class TestCategoryCloneAPI(BaseTest):
def __init__(self, name):
super().__init__(name)
def setUp(self):
super().setUp()
# Create an InventoryType and Project.
self.in_type = self._create_inventory_type()
self.project = self._create_project(self.in_type, members=[self.user])
kwargs = {'public_id': self.project.public_id}
self.project_uri = reverse('project-detail', kwargs=kwargs)
def flatten(self, items):
"""
Given a list, possibly nested to any level, return it flattened.
http://code.activestate.com/recipes/578948-flattening-an-arbitrarily-nested-list-in-python/
"""
flattened = []
for item in items:
if isinstance(item, list):
flattened.extend(self.flatten(item))
else:
flattened.append(item)
return flattened
def test_GET_category_clone_with_invalid_permissions(self):
"""
Test the category_clone endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
category = self._create_category(self.project, "Test Root Category")
uri = reverse('category-clone')
data = {}
su = data.setdefault('SU', {})
su['categories'] = [category.public_id]
su['project'] = self.project.public_id
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
def test_GET_category_clone_with_valid_permissions(self):
"""
Test the category_clone endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
categories = Category.objects.create_category_tree(
self.project, create_list)
uri = reverse('category-clone')
data = {}
su = data.setdefault('SU', {})
su['categories'] = [categories[0][0].public_id] # 'TestLevel-0'
su['project'] = self.project.public_id
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_valid_permissions(
uri, method, request_data=data, default_user=False)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_valid_permissions(
uri, method, request_data=data)
def test_GET_category_clone_with_parameters(self):
"""
Test the location_clone endpoint with various parameters.
"""
#self.skipTest("Temporarily skipped")
method = 'get'
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
categories = Category.objects.create_category_tree(
self.project, create_list)
uri = reverse('category-clone')
data = {}
data['categories'] = [categories[0][0].public_id] # 'TestLevel-0'
data['project'] = self.project.public_id
# Test with default arguments.
response = self.client.get(uri, data=data, format='json',
**self.HEADERS)
res_data = self.flatten(response.data)
msg = ("data: {}, found '{}' records , should be 5 records"
).format(res_data, len(res_data))
self.assertEqual(len(res_data), 5, msg)
# Test with with_root=False
data['with_root'] = False
response = self.client.get(uri, data=data, format='json',
**self.HEADERS)
res_data = self.flatten(response.data)
msg = ("data: {}, found '{}' records , should be 4 records"
).format(res_data, len(res_data))
self.assertEqual(len(res_data), 4, msg)
def test_POST_category_clone_with_invalid_permissions(self):
"""
Test the category_clone endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
uri = reverse('category-clone')
data = {}
su = data.setdefault('SU', {})
su['categories'] = create_list
su['project'] = self.project.public_id
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
categories = Category.objects.all()
msg = "categories: {}, count: {}, should be 0".format(
categories, categories.count())
self.assertEqual(categories.count(), 0, msg)
def test_POST_category_clone_with_valid_permissions(self):
"""
Test the category_clone endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'post'
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
uri = reverse('category-clone')
data = {}
su = data.setdefault('SU', {})
su['categories'] = create_list
su['project'] = self.project.public_id
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_valid_permissions(
uri, method, request_data=data, default_user=False)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_valid_permissions(
uri, method, request_data=data, project_user=False)
categories = Category.objects.all()
msg = "categories: {}, count: {}, should be 5".format(
categories, categories.count())
self.assertEqual(categories.count(), 5, msg)
def test_DELETE_category_clone_with_invalid_permissions(self):
"""
Test the category_clone endpoint with no permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
categories = Category.objects.create_category_tree(
self.project, create_list)
uri = reverse('category-clone')
data = {}
su = data.setdefault('SU', {})
su['categories'] = [categories[0][0].public_id]
su['project'] = self.project.public_id
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_invalid_permissions(
uri, method, request_data=data)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_invalid_permissions(
uri, method, request_data=data)
categories = Category.objects.all()
msg = "categories: {}, count: {}, should be 5".format(
categories, categories.count())
self.assertEqual(categories.count(), 5, msg)
def test_DELETE_category_clone_with_valid_permissions(self):
"""
Test the category_clone endpoint with valid permissions.
"""
#self.skipTest("Temporarily skipped")
method = 'delete'
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
categories = Category.objects.create_category_tree(
self.project, create_list)
uri = reverse('category-clone')
data = {}
su = data.setdefault('SU', {})
su['categories'] = [categories[0][0].public_id]
su['project'] = self.project.public_id
data.setdefault('AD', su.copy())
data.setdefault('DU', su.copy())
self._test_users_with_valid_permissions(
uri, method, request_data=data, default_user=False)
data.setdefault('POW', su.copy())
data.setdefault('PMA', su.copy())
data.setdefault('PDU', su.copy())
self._test_project_users_with_valid_permissions(
uri, method, request_data=data, project_user=False)
categories = Category.objects.all()
msg = "categories: {}, count: {}, should be 0".format(
categories, categories.count())
self.assertEqual(categories.count(), 0, msg)
def test_category_clone_serializer_validation_errors_on_project(self):
"""
Test that invalid data causes validation errors.
"""
#self.skipTest("Temporarily skipped")
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
categories = Category.objects.create_category_tree(
self.project, create_list)
uri = reverse('category-clone')
data = {}
data['categories'] = [categories[0][0].public_id]
data['project'] = 'junk'
kwargs = self._setup_user_credentials()
kwargs['login'] = True
kwargs['is_superuser'] = False
kwargs['role'] = self.DEFAULT_USER
user, client = self._create_user(**kwargs)
self.project.process_members([self.user, user])
self.project.set_role(user, self.PROJECT_USER)
response = client.get(uri, data=data, format='json', **self.HEADERS)
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, msg)
self.assertTrue(self._has_error(response, error_key='project'), msg)
self._test_errors(response, tests={
'project': "A project with the public_id 'junk' does not exist.",
})
def test_category_clone_serializer_validation_errors_on_categories(self):
"""
Test that invalid data causes validation errors.
"""
#self.skipTest("Temporarily skipped")
create_list = [['TestLevel-0', (('TestLevel-1', 'TestLevel-2',),
('TestLevel-1a', 'TestLevel-2a',))]]
categories = Category.objects.create_category_tree(
self.project, create_list)
uri = reverse('category-clone')
data = {}
data['categories'] = []
data['project'] = self.project.public_id
kwargs = self._setup_user_credentials()
kwargs['login'] = True
kwargs['is_superuser'] = False
kwargs['role'] = self.DEFAULT_USER
user, client = self._create_user(**kwargs)
self.project.process_members([self.user, user])
self.project.set_role(user, self.PROJECT_USER)
status_code = status.HTTP_400_BAD_REQUEST
response = client.get(uri, data=data, **self.HEADERS)
msg = "Response: {} should be {}, content: {}".format(
response.status_code, status_code, response.data)
self.assertEqual(response.status_code, status_code, msg)
self.assertTrue(self._has_error(response, error_key='categories'), msg)
self._test_errors(response, tests={
'categories': "This field is required.",
})
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_annotation231
except ImportError:
btp_annotation231 = sys.modules["onshape_client.oas.models.btp_annotation231"]
try:
from onshape_client.oas.models import btp_argument_declaration232
except ImportError:
btp_argument_declaration232 = sys.modules[
"onshape_client.oas.models.btp_argument_declaration232"
]
try:
from onshape_client.oas.models import btp_builtin_identifier233
except ImportError:
btp_builtin_identifier233 = sys.modules[
"onshape_client.oas.models.btp_builtin_identifier233"
]
try:
from onshape_client.oas.models import btp_literal_map_entry257
except ImportError:
btp_literal_map_entry257 = sys.modules[
"onshape_client.oas.models.btp_literal_map_entry257"
]
try:
from onshape_client.oas.models import btp_module234
except ImportError:
btp_module234 = sys.modules["onshape_client.oas.models.btp_module234"]
try:
from onshape_client.oas.models import btp_module_id235
except ImportError:
btp_module_id235 = sys.modules["onshape_client.oas.models.btp_module_id235"]
try:
from onshape_client.oas.models import btp_name261
except ImportError:
btp_name261 = sys.modules["onshape_client.oas.models.btp_name261"]
try:
from onshape_client.oas.models import btp_property_accessor23
except ImportError:
btp_property_accessor23 = sys.modules[
"onshape_client.oas.models.btp_property_accessor23"
]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
try:
from onshape_client.oas.models import btp_statement269
except ImportError:
btp_statement269 = sys.modules["onshape_client.oas.models.btp_statement269"]
try:
from onshape_client.oas.models import btp_top_level_node286
except ImportError:
btp_top_level_node286 = sys.modules[
"onshape_client.oas.models.btp_top_level_node286"
]
try:
from onshape_client.oas.models import btp_type_name290
except ImportError:
btp_type_name290 = sys.modules["onshape_client.oas.models.btp_type_name290"]
try:
from onshape_client.oas.models import btpl_value249
except ImportError:
btpl_value249 = sys.modules["onshape_client.oas.models.btpl_value249"]
class BTPNode7(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEFINITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"atomic": (bool,), # noqa: E501
"bt_type": (str,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"start_source_location": (int,), # noqa: E501
}
@staticmethod
def discriminator():
return {
"bt_type": {
"BTPTopLevelNode-286": btp_top_level_node286.BTPTopLevelNode286,
"BTPStatement-269": btp_statement269.BTPStatement269,
"BTPPropertyAccessor-23": btp_property_accessor23.BTPPropertyAccessor23,
"BTPTypeName-290": btp_type_name290.BTPTypeName290,
"BTPModule-234": btp_module234.BTPModule234,
"BTPLValue-249": btpl_value249.BTPLValue249,
"BTPBuiltinIdentifier-233": btp_builtin_identifier233.BTPBuiltinIdentifier233,
"BTPName-261": btp_name261.BTPName261,
"BTPLiteralMapEntry-257": btp_literal_map_entry257.BTPLiteralMapEntry257,
"BTPArgumentDeclaration-232": btp_argument_declaration232.BTPArgumentDeclaration232,
"BTPModuleId-235": btp_module_id235.BTPModuleId235,
},
}
attribute_map = {
"atomic": "atomic", # noqa: E501
"bt_type": "btType", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_node7.BTPNode7 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
atomic (bool): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
documentation_type (str): [optional] # noqa: E501
end_source_location (int): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
short_descriptor (str): [optional] # noqa: E501
space_after (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before (btp_space10.BTPSpace10): [optional] # noqa: E501
space_default (bool): [optional] # noqa: E501
start_source_location (int): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
@classmethod
def get_discriminator_class(cls, from_server, data):
"""Returns the child class specified by the discriminator"""
discriminator = cls.discriminator()
discr_propertyname_py = list(discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if from_server:
class_name = data[discr_propertyname_js]
else:
class_name = data[discr_propertyname_py]
class_name_to_discr_class = discriminator[discr_propertyname_py]
return class_name_to_discr_class.get(class_name)
|
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import subprocess
import shutil
import unittest
import functools
import imath
import IECore
import Gaffer
import GafferTest
import GafferOSL
import GafferOSLTest
class OSLCodeTest( GafferOSLTest.OSLTestCase ) :
def testPlugTypes( self ) :
oslCode = GafferOSL.OSLCode()
code = ""
for i, plugType in enumerate( [
Gaffer.IntPlug,
Gaffer.FloatPlug,
functools.partial( Gaffer.V3fPlug, interpretation = IECore.GeometricData.Interpretation.Vector ),
Gaffer.Color3fPlug,
Gaffer.M44fPlug,
Gaffer.StringPlug,
GafferOSL.ClosurePlug,
] ) :
inName = "in%d" % i
outName = "out%d" % i
oslCode["parameters"][inName] = plugType( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"][outName] = plugType( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
code += "%s = %s;\n" % ( outName, inName )
oslCode["code"].setValue( code )
# The OSLCode node will have generated a shader from
# the code and parameters we gave it. Load this onto
# a regular OSLShader node to check it.
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( self.__osoFileName( oslCode ) )
self.assertEqual( oslShader["parameters"].keys(), oslCode["parameters"].keys() )
self.assertEqual( oslShader["out"].keys(), oslCode["out"].keys() )
for p in oslShader["parameters"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["parameters"][p.getName()] ) )
for p in oslShader["out"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["out"][p.getName()] ) )
def testParseError( self ) :
n = GafferOSL.OSLCode()
self.__assertError( n, n["code"].setValue, "oops" )
def testParseErrorDoesntDestroyExistingPlugs( self ) :
n = GafferOSL.OSLCode()
n["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
originalPlugs = n["parameters"].children() + n["out"].children()
self.__assertError( n, n["code"].setValue, "oops" )
self.assertEqual( n["parameters"].children() + n["out"].children(), originalPlugs )
def testChildAddedSignalNotSuppressedByError( self ) :
n = GafferOSL.OSLCode()
self.__assertError( n, n["code"].setValue, "oops" )
cs = GafferTest.CapturingSlot( n["parameters"].childAddedSignal() )
n["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( len( cs ), 1 )
def testEmpty( self ) :
# We want empty shaders to still output a
# shader so that the ShaderView picks it
# up, ready to update when an output is
# added.
n = GafferOSL.OSLCode()
self.assertTrue( self.__osoFileName( n ) )
self.assertEqual( n["type"].getValue(), "osl:shader" )
n["code"].setValue( "//" )
self.assertTrue( self.__osoFileName( n ) )
self.assertEqual( n["type"].getValue(), "osl:shader" )
n["code"].setValue( "" )
self.assertTrue( self.__osoFileName( n ) )
self.assertEqual( n["type"].getValue(), "osl:shader" )
def testMissingSemiColon( self ) :
n1 = GafferOSL.OSLCode()
n1["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n1["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n2 = GafferOSL.OSLCode()
n2["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n2["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
# The OSLCode node will often be used to throw in a one-liner,
# and omitting a semicolon is an easy mistake that we should
# correct automatically.
n1["code"].setValue( "out = in * 2" )
n2["code"].setValue( "out = in * 2;" )
self.assertEqual( self.__osoFileName( n1 ), self.__osoFileName( n2 ) )
def testAddingAndRemovingPlugsUpdatesShader( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"]["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( self.__osoFileName( oslCode ) )
self.assertTrue( "in" in oslShader["parameters"] )
self.assertTrue( "out" in oslShader["out"] )
def testObjectProcessingFunctions( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["out"]["out"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__assertNoError( oslCode, oslCode["code"].setValue, 'out = inFloat( "s", 0 );' )
def testImageProcessingFunctions( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["out"]["out"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__assertNoError( oslCode, oslCode["code"].setValue, 'out = inChannel( "R", 0 );' )
def testColorSpline( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["sp"] = Gaffer.SplinefColor3fPlug(
defaultValue = IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
)
),
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["code"].setValue( "o = colorSpline( spPositions, spValues, spBasis, u );" )
# Load the generated shader onto an OSLShader
# node to verify it.
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( self.__osoFileName( oslCode ) )
oslShader["parameters"]["sp"].setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( oslShader["parameters"]["sp"] ), repr( oslCode["parameters"]["sp"] ) )
def testShaderNameMatchesFileName( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["code"].setValue( "o = color( 0, 1, 0 );" )
info = subprocess.check_output( [ "oslinfo", self.__osoFileName( oslCode ) ] )
self.assertTrue(
info.startswith( "shader \"{0}\"".format( os.path.basename( self.__osoFileName( oslCode ) ) ) )
)
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["o"] = GafferOSL.OSLCode()
s["o"]["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["code"].setValue( "o = i * color( u, v, 0 );")
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( self.__osoFileName( s2["o"] ), self.__osoFileName( s["o"] ) )
def testUndo( self ) :
s = Gaffer.ScriptNode()
s["o"] = GafferOSL.OSLCode()
f1 = self.__osoFileName( s["o"] )
with Gaffer.UndoScope( s ) :
s["o"]["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
f2 = self.__osoFileName( s["o"] )
with Gaffer.UndoScope( s ) :
s["o"]["code"].setValue( "o = i * color( u, v, 0 );")
f3 = self.__osoFileName( s["o"] )
s.undo()
self.assertEqual( self.__osoFileName( s["o"] ), f2 )
s.undo()
self.assertEqual( self.__osoFileName( s["o"] ), f1 )
s.redo()
self.assertEqual( self.__osoFileName( s["o"] ), f2 )
s.redo()
self.assertEqual( self.__osoFileName( s["o"] ), f3 )
def testSource( self ) :
# Make a shader using the OSLCode node.
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["code"].setValue( "o = i * color( u, v, 0 );")
# Export it to a .osl file and compile it.
oslFileName = os.path.join( self.temporaryDirectory(), "test.osl" )
with open( oslFileName, "w" ) as f :
f.write( oslCode.source( "test") )
shader = self.compileShader( oslFileName )
# Load that onto an OSLShader and check that
# it matches.
oslShader = GafferOSL.OSLShader()
oslShader.loadShader( shader )
self.assertEqual( oslShader["parameters"].keys(), oslCode["parameters"].keys() )
self.assertEqual( oslShader["out"].keys(), oslCode["out"].keys() )
for p in oslShader["parameters"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["parameters"][p.getName()] ) )
for p in oslShader["out"].children() :
p.setFlags( Gaffer.Plug.Flags.Dynamic, True )
self.assertEqual( repr( p ), repr( oslCode["out"][p.getName()] ) )
def testSourceUsesRequestedName( self ) :
oslCode = GafferOSL.OSLCode()
source = oslCode.source( "test" )
self.assertTrue( "shader test" in source )
def testParameterRenaming( self ) :
oslCode = GafferOSL.OSLCode()
oslCode["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
oslCode["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__assertError( oslCode, oslCode["code"].setValue, "o = in" )
cs = GafferTest.CapturingSlot( oslCode.plugDirtiedSignal() )
self.__assertNoError( oslCode, oslCode["parameters"]["i"].setName, "in" )
self.assertTrue( oslCode["out"] in [ x[0] for x in cs ] )
self.__assertError( oslCode, oslCode["parameters"]["in"].setName, "i" )
def testMoveCodeDirectory( self ) :
oslCodeDir = os.environ.get( "GAFFEROSL_CODE_DIRECTORY" )
if oslCodeDir :
self.addCleanup( os.environ.__setitem__, "GAFFEROSL_CODE_DIRECTORY", oslCodeDir )
else :
self.addCleanup( os.environ.__delitem__, "GAFFEROSL_CODE_DIRECTORY" )
# Make an OSL shader in a specific code directory.
os.environ["GAFFEROSL_CODE_DIRECTORY"] = os.path.join( self.temporaryDirectory(), "codeDirectoryA" )
s = Gaffer.ScriptNode()
s["o"] = GafferOSL.OSLCode()
s["o"]["parameters"]["i"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["out"]["o"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["o"]["code"].setValue( "o = i * color( u, v, 0 );")
self.assertTrue( self.__osoFileName( s["o"] ).startswith( os.environ["GAFFEROSL_CODE_DIRECTORY"] ) )
# Now simulate the loading of that script in a different environment,
# with a different code directory.
ss = s.serialise()
shutil.rmtree( os.environ["GAFFEROSL_CODE_DIRECTORY"] )
os.environ["GAFFEROSL_CODE_DIRECTORY"] = os.path.join( self.temporaryDirectory(), "codeDirectoryB" )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertTrue( self.__osoFileName( s2["o"] ).startswith( os.environ["GAFFEROSL_CODE_DIRECTORY"] ) )
def __osoFileName( self, oslCode ) :
# Right now we could get this information by
# getting the value directly from the "name" plug
# on the OSLCode node, but we're getting it from
# the computed shader instead, in the hope that
# one day we can refactor things so that it's the
# generation of the shader network that also generates
# the file on disk. It might be that the
# `GafferScene::Shader` base class shouldn't even
# mandate the existence of "name" and "type" plugs.
return oslCode.attributes()["osl:shader"].outputShader().name
def __assertError( self, oslCode, fn, *args, **kw ) :
cs = GafferTest.CapturingSlot( oslCode.errorSignal() )
fn( *args, **kw )
self.__osoFileName( oslCode )
self.assertEqual( len( cs ), 1 )
def __assertNoError( self, oslCode, fn, *args, **kw ) :
cs = GafferTest.CapturingSlot( oslCode.errorSignal() )
fn( *args, **kw )
self.__osoFileName( oslCode )
self.assertEqual( len( cs ), 0 )
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import division
import numpy as np
from scipy.integrate import odeint
from bokeh.plotting import *
def streamlines(x, y, u, v, density=1):
'''Returns streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines. For different
densities in each direction, use a tuple or list [densityx, densityy].
'''
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = np.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
NBX = int(30*density)
NBY = int(30*density)
blank = np.zeros((NBY,NBX))
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
if type(xi) == np.ndarray:
x = xi.astype(np.int)
y = yi.astype(np.int)
else:
x = np.int(xi)
y = np.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
integrator = rk4
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t != None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))//2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
xs = [np.array(t[0])*DX+XOFF for t in trajectories]
ys = [np.array(t[1])*DY+YOFF for t in trajectories]
return xs, ys
xx = np.linspace(-3, 3, 100)
yy = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(xx, yy)
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
theta = np.arctan(V/U)
x0 = X[::2, ::2].flatten()
y0 = Y[::2, ::2].flatten()
length = speed[::2, ::2].flatten()/40
angle = theta[::2, ::2].flatten()
x1 = x0 + length * np.cos(angle)
y1 = y0 + length * np.sin(angle)
xs, ys = streamlines(xx, yy, U.T, V.T, density=2)
cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"])
ix = ((length-length.min())/(length.max()-length.min())*5).astype('int')
colors = cm[ix]
output_file("vector.html", title="vector.py example")
figure(tools="pan,wheel_zoom,box_zoom,reset,previewsave")
segment(x0, y0, x1, y1,
line_color=colors, line_width=2,
)
multi_line(xs, ys,
line_color="#ee6666", line_width=2, line_alpha=0.8,
name="vector example"
)
show() # open a browser
|
|
from argparse import ArgumentParser
from .util.compat import SafeConfigParser
import inspect
import os
import sys
from . import command
from . import util
from . import package_dir
from .util import compat
class Config(object):
"""Represent an Alembic configuration.
Within an ``env.py`` script, this is available
via the :attr:`.EnvironmentContext.config` attribute,
which in turn is available at ``alembic.context``::
from alembic import context
some_param = context.config.get_main_option("my option")
When invoking Alembic programatically, a new
:class:`.Config` can be created by passing
the name of an .ini file to the constructor::
from alembic.config import Config
alembic_cfg = Config("/path/to/yourapp/alembic.ini")
With a :class:`.Config` object, you can then
run Alembic commands programmatically using the directives
in :mod:`alembic.command`.
The :class:`.Config` object can also be constructed without
a filename. Values can be set programmatically, and
new sections will be created as needed::
from alembic.config import Config
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", "myapp:migrations")
alembic_cfg.set_main_option("url", "postgresql://foo/bar")
alembic_cfg.set_section_option("mysection", "foo", "bar")
.. warning::
When using programmatic configuration, make sure the
``env.py`` file in use is compatible with the target configuration;
including that the call to Python ``logging.fileConfig()`` is
omitted if the programmatic configuration doesn't actually include
logging directives.
For passing non-string values to environments, such as connections and
engines, use the :attr:`.Config.attributes` dictionary::
with engine.begin() as connection:
alembic_cfg.attributes['connection'] = connection
command.upgrade(alembic_cfg, "head")
:param file_: name of the .ini file to open.
:param ini_section: name of the main Alembic section within the
.ini file
:param output_buffer: optional file-like input buffer which
will be passed to the :class:`.MigrationContext` - used to redirect
the output of "offline generation" when using Alembic programmatically.
:param stdout: buffer where the "print" output of commands will be sent.
Defaults to ``sys.stdout``.
.. versionadded:: 0.4
:param config_args: A dictionary of keys and values that will be used
for substitution in the alembic config file. The dictionary as given
is **copied** to a new one, stored locally as the attribute
``.config_args``. When the :attr:`.Config.file_config` attribute is
first invoked, the replacement variable ``here`` will be added to this
dictionary before the dictionary is passed to ``SafeConfigParser()``
to parse the .ini file.
.. versionadded:: 0.7.0
:param attributes: optional dictionary of arbitrary Python keys/values,
which will be populated into the :attr:`.Config.attributes` dictionary.
.. versionadded:: 0.7.5
.. seealso::
:ref:`connection_sharing`
"""
def __init__(self, file_=None, ini_section='alembic', output_buffer=None,
stdout=sys.stdout, cmd_opts=None,
config_args=util.immutabledict(), attributes=None):
"""Construct a new :class:`.Config`
"""
self.config_file_name = file_
self.config_ini_section = ini_section
self.output_buffer = output_buffer
self.stdout = stdout
self.cmd_opts = cmd_opts
self.config_args = dict(config_args)
if attributes:
self.attributes.update(attributes)
cmd_opts = None
"""The command-line options passed to the ``alembic`` script.
Within an ``env.py`` script this can be accessed via the
:attr:`.EnvironmentContext.config` attribute.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_x_argument`
"""
config_file_name = None
"""Filesystem path to the .ini file in use."""
config_ini_section = None
"""Name of the config file section to read basic configuration
from. Defaults to ``alembic``, that is the ``[alembic]`` section
of the .ini file. This value is modified using the ``-n/--name``
option to the Alembic runnier.
"""
@util.memoized_property
def attributes(self):
"""A Python dictionary for storage of additional state.
This is a utility dictionary which can include not just strings but
engines, connections, schema objects, or anything else.
Use this to pass objects into an env.py script, such as passing
a :class:`sqlalchemy.engine.base.Connection` when calling
commands from :mod:`alembic.command` programmatically.
.. versionadded:: 0.7.5
.. seealso::
:ref:`connection_sharing`
:paramref:`.Config.attributes`
"""
return {}
def print_stdout(self, text, *arg):
"""Render a message to standard out."""
util.write_outstream(
self.stdout,
(compat.text_type(text) % arg),
"\n"
)
@util.memoized_property
def file_config(self):
"""Return the underlying ``ConfigParser`` object.
Direct access to the .ini file is available here,
though the :meth:`.Config.get_section` and
:meth:`.Config.get_main_option`
methods provide a possibly simpler interface.
"""
if self.config_file_name:
here = os.path.abspath(os.path.dirname(self.config_file_name))
else:
here = ""
self.config_args['here'] = here
file_config = SafeConfigParser(self.config_args)
if self.config_file_name:
file_config.read([self.config_file_name])
else:
file_config.add_section(self.config_ini_section)
return file_config
def get_template_directory(self):
"""Return the directory where Alembic setup templates are found.
This method is used by the alembic ``init`` and ``list_templates``
commands.
"""
return os.path.join(package_dir, 'templates')
def get_section(self, name):
"""Return all the configuration options from a given .ini file section
as a dictionary.
"""
return dict(self.file_config.items(name))
def set_main_option(self, name, value):
"""Set an option programmatically within the 'main' section.
This overrides whatever was in the .ini file.
:param name: name of the value
:param value: the value. Note that this value is passed to
``ConfigParser.set``, which supports variable interpolation using
pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
an interpolation symbol must therefore be escaped, e.g. ``%%``.
The given value may refer to another value already in the file
using the interpolation format.
"""
self.set_section_option(self.config_ini_section, name, value)
def remove_main_option(self, name):
self.file_config.remove_option(self.config_ini_section, name)
def set_section_option(self, section, name, value):
"""Set an option programmatically within the given section.
The section is created if it doesn't exist already.
The value here will override whatever was in the .ini
file.
:param section: name of the section
:param name: name of the value
:param value: the value. Note that this value is passed to
``ConfigParser.set``, which supports variable interpolation using
pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
an interpolation symbol must therefore be escaped, e.g. ``%%``.
The given value may refer to another value already in the file
using the interpolation format.
"""
if not self.file_config.has_section(section):
self.file_config.add_section(section)
self.file_config.set(section, name, value)
def get_section_option(self, section, name, default=None):
"""Return an option from the given section of the .ini file.
"""
if not self.file_config.has_section(section):
raise util.CommandError("No config file %r found, or file has no "
"'[%s]' section" %
(self.config_file_name, section))
if self.file_config.has_option(section, name):
return self.file_config.get(section, name)
else:
return default
def get_main_option(self, name, default=None):
"""Return an option from the 'main' section of the .ini file.
This defaults to being a key from the ``[alembic]``
section, unless the ``-n/--name`` flag were used to
indicate a different section.
"""
return self.get_section_option(self.config_ini_section, name, default)
class CommandLine(object):
def __init__(self, prog=None):
self._generate_args(prog)
def _generate_args(self, prog):
def add_options(parser, positional, kwargs):
kwargs_opts = {
'template': (
"-t", "--template",
dict(
default='generic',
type=str,
help="Setup template for use with 'init'"
)
),
'message': (
"-m", "--message",
dict(
type=str,
help="Message string to use with 'revision'")
),
'sql': (
"--sql",
dict(
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead"
)
),
'tag': (
"--tag",
dict(
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.")
),
'head': (
"--head",
dict(
type=str,
help="Specify head revision or <branchname>@head "
"to base new revision on."
)
),
'splice': (
"--splice",
dict(
action="store_true",
help="Allow a non-head revision as the "
"'head' to splice onto"
)
),
'depends_on': (
"--depends-on",
dict(
action="append",
help="Specify one or more revision identifiers "
"which this revision should depend on."
)
),
'rev_id': (
"--rev-id",
dict(
type=str,
help="Specify a hardcoded revision id instead of "
"generating one"
)
),
'version_path': (
"--version-path",
dict(
type=str,
help="Specify specific path from config for "
"version file"
)
),
'branch_label': (
"--branch-label",
dict(
type=str,
help="Specify a branch label to apply to the "
"new revision"
)
),
'verbose': (
"-v", "--verbose",
dict(
action="store_true",
help="Use more verbose output"
)
),
'resolve_dependencies': (
'--resolve-dependencies',
dict(
action="store_true",
help="Treat dependency versions as down revisions"
)
),
'autogenerate': (
"--autogenerate",
dict(
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.")
),
'head_only': (
"--head-only",
dict(
action="store_true",
help="Deprecated. Use --verbose for "
"additional output")
),
'rev_range': (
"-r", "--rev-range",
dict(
action="store",
help="Specify a revision range; "
"format is [start]:[end]")
)
}
positional_help = {
'directory': "location of scripts directory",
'revision': "revision identifier",
'revisions': "one or more revisions, or 'heads' for all heads"
}
for arg in kwargs:
if arg in kwargs_opts:
args = kwargs_opts[arg]
args, kw = args[0:-1], args[-1]
parser.add_argument(*args, **kw)
for arg in positional:
if arg == "revisions":
subparser.add_argument(
arg, nargs='+', help=positional_help.get(arg))
else:
subparser.add_argument(arg, help=positional_help.get(arg))
parser = ArgumentParser(prog=prog)
parser.add_argument("-c", "--config",
type=str,
default="alembic.ini",
help="Alternate config file")
parser.add_argument("-n", "--name",
type=str,
default="alembic",
help="Name of section in .ini file to "
"use for Alembic config")
parser.add_argument("-x", action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting")
parser.add_argument("--raiseerr", action="store_true",
help="Raise a full stack trace on error")
subparsers = parser.add_subparsers()
for fn in [getattr(command, n) for n in dir(command)]:
if inspect.isfunction(fn) and \
fn.__name__[0] != '_' and \
fn.__module__ == 'alembic.command':
spec = inspect.getargspec(fn)
if spec[3]:
positional = spec[0][1:-len(spec[3])]
kwarg = spec[0][-len(spec[3]):]
else:
positional = spec[0][1:]
kwarg = []
subparser = subparsers.add_parser(
fn.__name__,
help=fn.__doc__)
add_options(subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
self.parser = parser
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(config,
*[getattr(options, k) for k in positional],
**dict((k, getattr(options, k)) for k in kwarg)
)
except util.CommandError as e:
if options.raiseerr:
raise
else:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
cfg = Config(file_=options.config,
ini_section=options.name, cmd_opts=options)
self.run_cmd(cfg, options)
def main(argv=None, prog=None, **kwargs):
"""The console runner function for Alembic."""
CommandLine(prog=prog).main(argv=argv)
if __name__ == '__main__':
main()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
Requires `pyyaml <http://pyyaml.org/>`_ to be installed.
"""
import os
import copy
import sys
from io import StringIO
import pytest
import numpy as np
from ....table import Table, Column, QTable, NdarrayMixin
from ....table.table_helpers import simple_table
from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation
from ....time import Time, TimeDelta
from ....tests.helper import quantity_allclose
from ....units.quantity import QuantityInfo
from ..ecsv import DELIMITERS
from ... import ascii
from .... import units as u
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float16', 'float32', 'float64', 'float128',
'str']
if os.name == 'nt' or sys.maxsize <= 2**32:
DTYPES.remove('float128')
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == 'bool':
data = np.array([False, True, False])
elif dtype == 'str':
data = np.array(['ab 0', 'ab, 1', 'ab2'])
else:
data = np.arange(3, dtype=dtype)
c = Column(data, unit='m / s', description='descr_' + dtype,
meta={'meta ' + dtype: 1})
T_DTYPES[dtype] = c
T_DTYPES.meta['comments'] = ['comment1', 'comment2']
# Corresponds to simple_table()
SIMPLE_LINES = ['# %ECSV 0.9',
'# ---',
'# datatype:',
'# - {name: a, datatype: int64}',
'# - {name: b, datatype: float64}',
'# - {name: c, datatype: string}',
'# schema: astropy-2.0',
'a b c',
'1 1.0 c',
'2 2.0 d',
'3 3.0 e']
@pytest.mark.skipif('not HAS_YAML')
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == SIMPLE_LINES
@pytest.mark.skipif('not HAS_YAML')
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES['bool', 'int64', 'float64', 'str']
lines = ['# %ECSV 0.9',
'# ---',
'# datatype:',
'# - name: bool',
'# unit: m / s',
'# datatype: bool',
'# description: descr_bool',
'# meta: {meta bool: 1}',
'# - name: int64',
'# unit: m / s',
'# datatype: int64',
'# description: descr_int64',
'# meta: {meta int64: 1}',
'# - name: float64',
'# unit: m / s',
'# datatype: float64',
'# description: descr_float64',
'# meta: {meta float64: 1}',
'# - name: str',
'# unit: m / s',
'# datatype: string',
'# description: descr_str',
'# meta: {meta str: 1}',
'# meta: !!omap',
'# - comments: [comment1, comment2]',
'# schema: astropy-2.0',
'bool int64 float64 str',
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
'False 2 2.0 ab2']
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == lines
@pytest.mark.skipif('not HAS_YAML')
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format='ascii.ecsv', delimiter=delimiter)
t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),
Table.read(out.getvalue(), format='ascii'),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format='ecsv', guess=False),
ascii.read(out.getvalue(), format='ecsv')]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
@pytest.mark.skipif('not HAS_YAML')
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')
assert 'only space and comma are allowed' in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = '# %ECV 0.9'
with pytest.raises(ascii.InconsistentTableError):
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
@pytest.mark.skipif('not HAS_YAML')
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, '# delimiter: |')
with pytest.raises(ValueError) as err:
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
assert 'only space and comma are allowed' in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table([np.arange(4).reshape(2, 2)], names=['a'])
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='ascii.ecsv')
assert 'ECSV format does not support multidimensional column' in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.dtype == t2.dtype
assert len(t2) == 0
@pytest.mark.skipif('not HAS_YAML')
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index('a b c')
lines[header_index] = 'a b d'
with pytest.raises(ValueError) as err:
ascii.read(lines, format='ecsv')
assert "column names from ECSV header ['a', 'b', 'c']" in str(err)
@pytest.mark.skipif('not HAS_YAML')
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5*u.km, "foo2": u.s}
t["bar"] = [7]*u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert '!astropy.units.Unit' in out.getvalue()
assert '!astropy.units.Quantity' in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
el = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scc = sc.copy()
scc.representation = 'cartesian'
tm = Time([51000.5, 51001.5], format='mjd', scale='tai', precision=5, location=el[0])
tm2 = Time(tm, format='iso')
tm3 = Time(tm, location=el)
tm3.info.serialize_method['ecsv'] = 'jd1_jd2'
mixin_cols = {
'tm': tm,
'tm2': tm2,
'tm3': tm3,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scc': scc,
'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5'] * 2),
'q': [1, 2] * u.m,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180.*u.deg),
'ang': Angle([1, 2] * u.deg),
'el': el,
# 'nd': NdarrayMixin(el) # not supported yet
}
time_attrs = ['value', 'shape', 'format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'tm2': time_attrs,
'tm3': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation', 'frame.name'],
'scc': ['x', 'y', 'z', 'representation', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'],
'q': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
}
@pytest.mark.skipif('not HAS_YAML')
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable({name: col for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)})
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is Table
# Add a single quantity column
t['lon'] = mixin_cols['lon']
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is QTable
@pytest.mark.skipif('not HAS_YAML')
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
serialized_names = ['ang',
'dt',
'el.x', 'el.y', 'el.z',
'lat',
'lon',
'q',
'sc.ra', 'sc.dec',
'scc.x', 'scc.y', 'scc.z',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime',
'tm', # serialize_method is formatted_value
'tm2', # serialize_method is formatted_value
'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2
'tm3.location.x', 'tm3.location.y', 'tm3.location.z']
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format='ascii.basic')
assert t3.colnames == serialized_names
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_per_column(table_cls, name_col):
"""Test write/read one col at a time and do detailed validation"""
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'description'
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2015 Hamilton Kibbe <[email protected]> and Paulo Henrique Silva
# <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import asin
import math
from .primitives import *
from .utils import validate_coordinates, inch, metric, rotate_point
# TODO: Add support for aperture macro variables
__all__ = ['AMPrimitive', 'AMCommentPrimitive', 'AMCirclePrimitive',
'AMVectorLinePrimitive', 'AMOutlinePrimitive', 'AMPolygonPrimitive',
'AMMoirePrimitive', 'AMThermalPrimitive', 'AMCenterLinePrimitive',
'AMLowerLeftLinePrimitive', 'AMUnsupportPrimitive']
class AMPrimitive(object):
""" Aperture Macro Primitive Base Class
Parameters
----------
code : int
primitive shape code
exposure : str
on or off Primitives with exposure on create a slid part of
the macro aperture, and primitives with exposure off erase the
solid part created previously in the aperture macro definition.
.. note::
The erasing effect is limited to the aperture definition in
which it occurs.
Returns
-------
primitive : :class: `gerber.am_statements.AMPrimitive`
Raises
------
TypeError, ValueError
"""
def __init__(self, code, exposure=None):
VALID_CODES = (0, 1, 2, 4, 5, 6, 7, 20, 21, 22, 9999)
if not isinstance(code, int):
raise TypeError('Aperture Macro Primitive code must be an integer')
elif code not in VALID_CODES:
raise ValueError('Invalid Code. Valid codes are %s.' %
', '.join(map(str, VALID_CODES)))
if exposure is not None and exposure.lower() not in ('on', 'off'):
raise ValueError('Exposure must be either on or off')
self.code = code
self.exposure = exposure.lower() if exposure is not None else None
def to_inch(self):
raise NotImplementedError('Subclass must implement `to-inch`')
def to_metric(self):
raise NotImplementedError('Subclass must implement `to-metric`')
@property
def _level_polarity(self):
if self.exposure == 'off':
return 'clear'
return 'dark'
def to_primitive(self, units):
""" Return a Primitive instance based on the specified macro params.
"""
print('Rendering {}s is not supported yet.'.format(str(self.__class__)))
def __eq__(self, other):
return self.__dict__ == other.__dict__
class AMCommentPrimitive(AMPrimitive):
""" Aperture Macro Comment primitive. Code 0
The comment primitive has no image meaning. It is used to include human-
readable comments into the AM command.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.1:** Comment, primitive code 0
Parameters
----------
code : int
Aperture Macro primitive code. 0 Indicates an AMCommentPrimitive
comment : str
The comment as a string.
Returns
-------
CommentPrimitive : :class:`gerber.am_statements.AMCommentPrimitive`
An Initialized AMCommentPrimitive
Raises
------
ValueError
"""
@classmethod
def from_gerber(cls, primitive):
primitive = primitive.strip()
code = int(primitive[0])
comment = primitive[1:]
return cls(code, comment)
def __init__(self, code, comment):
if code != 0:
raise ValueError('Not a valid Aperture Macro Comment statement')
super(AMCommentPrimitive, self).__init__(code)
self.comment = comment.strip(' *')
def to_inch(self):
pass
def to_metric(self):
pass
def to_gerber(self, settings=None):
return '0 %s *' % self.comment
def to_primitive(self, units):
"""
Returns None - has not primitive representation
"""
return None
def __str__(self):
return '<Aperture Macro Comment: %s>' % self.comment
class AMCirclePrimitive(AMPrimitive):
""" Aperture macro Circle primitive. Code 1
A circle primitive is defined by its center point and diameter.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.2:** Circle, primitive code 1
Parameters
----------
code : int
Circle Primitive code. Must be 1
exposure : string
'on' or 'off'
diameter : float
Circle diameter
position : tuple (<float>, <float>)
Position of the circle relative to the macro origin
Returns
-------
CirclePrimitive : :class:`gerber.am_statements.AMCirclePrimitive`
An initialized AMCirclePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(',')
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
diameter = float(modifiers[2])
position = (float(modifiers[3]), float(modifiers[4]))
return cls(code, exposure, diameter, position)
@classmethod
def from_primitive(cls, primitive):
return cls(1, 'on', primitive.diameter, primitive.position)
def __init__(self, code, exposure, diameter, position):
validate_coordinates(position)
if code != 1:
raise ValueError('CirclePrimitive code is 1')
super(AMCirclePrimitive, self).__init__(code, exposure)
self.diameter = diameter
self.position = position
def to_inch(self):
self.diameter = inch(self.diameter)
self.position = tuple([inch(x) for x in self.position])
def to_metric(self):
self.diameter = metric(self.diameter)
self.position = tuple([metric(x) for x in self.position])
def to_gerber(self, settings=None):
data = dict(code=self.code,
exposure='1' if self.exposure == 'on' else 0,
diameter=self.diameter,
x=self.position[0],
y=self.position[1])
return '{code},{exposure},{diameter},{x},{y}*'.format(**data)
def to_primitive(self, units):
return Circle((self.position), self.diameter, units=units, level_polarity=self._level_polarity)
class AMVectorLinePrimitive(AMPrimitive):
""" Aperture Macro Vector Line primitive. Code 2 or 20.
A vector line is a rectangle defined by its line width, start, and end
points. The line ends are rectangular.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.3:** Vector Line, primitive code 2 or 20.
Parameters
----------
code : int
Vector Line Primitive code. Must be either 2 or 20.
exposure : string
'on' or 'off'
width : float
Line width
start : tuple (<float>, <float>)
coordinate of line start point
end : tuple (<float>, <float>)
coordinate of line end point
rotation : float
Line rotation about the origin.
Returns
-------
LinePrimitive : :class:`gerber.am_statements.AMVectorLinePrimitive`
An initialized AMVectorLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(2, 'on', primitive.aperture.width, primitive.start, primitive.end, 0)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(',')
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
start = (float(modifiers[3]), float(modifiers[4]))
end = (float(modifiers[5]), float(modifiers[6]))
rotation = float(modifiers[7])
return cls(code, exposure, width, start, end, rotation)
def __init__(self, code, exposure, width, start, end, rotation):
validate_coordinates(start)
validate_coordinates(end)
if code not in (2, 20):
raise ValueError('VectorLinePrimitive codes are 2 or 20')
super(AMVectorLinePrimitive, self).__init__(code, exposure)
self.width = width
self.start = start
self.end = end
self.rotation = rotation
def to_inch(self):
self.width = inch(self.width)
self.start = tuple([inch(x) for x in self.start])
self.end = tuple([inch(x) for x in self.end])
def to_metric(self):
self.width = metric(self.width)
self.start = tuple([metric(x) for x in self.start])
self.end = tuple([metric(x) for x in self.end])
def to_gerber(self, settings=None):
fmtstr = '{code},{exp},{width},{startx},{starty},{endx},{endy},{rotation}*'
data = dict(code=self.code,
exp=1 if self.exposure == 'on' else 0,
width=self.width,
startx=self.start[0],
starty=self.start[1],
endx=self.end[0],
endy=self.end[1],
rotation=self.rotation)
return fmtstr.format(**data)
def to_primitive(self, units):
"""
Convert this to a primitive. We use the Outline to represent this (instead of Line)
because the behaviour of the end caps is different for aperture macros compared to Lines
when rotated.
"""
# Use a line to generate our vertices easily
line = Line(self.start, self.end, Rectangle(None, self.width, self.width))
vertices = line.vertices
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(vertices[-1], self.rotation, (0, 0))
for point in vertices:
cur_point = rotate_point(point, self.rotation, (0, 0))
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMOutlinePrimitive(AMPrimitive):
""" Aperture Macro Outline primitive. Code 4.
An outline primitive is an area enclosed by an n-point polygon defined by
its start point and n subsequent points. The outline must be closed, i.e.
the last point must be equal to the start point. Self intersecting
outlines are not allowed.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.6:** Outline, primitive code 4.
Parameters
----------
code : int
OutlinePrimitive code. Must be 6.
exposure : string
'on' or 'off'
start_point : tuple (<float>, <float>)
coordinate of outline start point
points : list of tuples (<float>, <float>)
coordinates of subsequent points
rotation : float
outline rotation about the origin.
Returns
-------
OutlinePrimitive : :class:`gerber.am_statements.AMOutlineinePrimitive`
An initialized AMOutlinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
start_point = (round(primitive.primitives[0].start[0], 6), round(primitive.primitives[0].start[1], 6))
points = []
for prim in primitive.primitives:
points.append((round(prim.end[0], 6), round(prim.end[1], 6)))
rotation = 0.0
return cls(4, 'on', start_point, points, rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
n = int(float(modifiers[2]))
start_point = (float(modifiers[3]), float(modifiers[4]))
points = []
for i in range(n):
points.append((float(modifiers[5 + i * 2]),
float(modifiers[5 + i * 2 + 1])))
rotation = float(modifiers[-1])
return cls(code, exposure, start_point, points, rotation)
def __init__(self, code, exposure, start_point, points, rotation):
""" Initialize AMOutlinePrimitive
"""
validate_coordinates(start_point)
for point in points:
validate_coordinates(point)
if code != 4:
raise ValueError('OutlinePrimitive code is 4')
super(AMOutlinePrimitive, self).__init__(code, exposure)
self.start_point = start_point
if points[-1] != start_point:
raise ValueError('OutlinePrimitive must be closed')
self.points = points
self.rotation = rotation
def to_inch(self):
self.start_point = tuple([inch(x) for x in self.start_point])
self.points = tuple([(inch(x), inch(y)) for x, y in self.points])
def to_metric(self):
self.start_point = tuple([metric(x) for x in self.start_point])
self.points = tuple([(metric(x), metric(y)) for x, y in self.points])
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
n_points=len(self.points),
start_point="%.6g,%.6g" % self.start_point,
points=",\n".join(["%.6g,%.6g" % point for point in self.points]),
rotation=str(self.rotation)
)
return "{code},{exposure},{n_points},{start_point},{points},{rotation}*".format(**data)
def to_primitive(self, units):
"""
Convert this to a drawable primitive. This uses the Outline instead of Line
primitive to handle differences in end caps when rotated.
"""
lines = []
prev_point = rotate_point(self.start_point, self.rotation)
for point in self.points:
cur_point = rotate_point(point, self.rotation)
lines.append(Line(prev_point, cur_point, Circle((0,0), 0)))
prev_point = cur_point
if lines[0].start != lines[-1].end:
raise ValueError('Outline must be closed')
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMPolygonPrimitive(AMPrimitive):
""" Aperture Macro Polygon primitive. Code 5.
A polygon primitive is a regular polygon defined by the number of
vertices, the center point, and the diameter of the circumscribed circle.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.8:** Polygon, primitive code 5.
Parameters
----------
code : int
PolygonPrimitive code. Must be 5.
exposure : string
'on' or 'off'
vertices : int, 3 <= vertices <= 12
Number of vertices
position : tuple (<float>, <float>)
X and Y coordinates of polygon center
diameter : float
diameter of circumscribed circle.
rotation : float
polygon rotation about the origin.
Returns
-------
PolygonPrimitive : :class:`gerber.am_statements.AMPolygonPrimitive`
An initialized AMPolygonPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(5, 'on', primitive.sides, primitive.position, primitive.diameter, primitive.rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
vertices = int(float(modifiers[2]))
position = (float(modifiers[3]), float(modifiers[4]))
try:
diameter = float(modifiers[5])
except:
diameter = 0
rotation = float(modifiers[6])
return cls(code, exposure, vertices, position, diameter, rotation)
def __init__(self, code, exposure, vertices, position, diameter, rotation):
""" Initialize AMPolygonPrimitive
"""
if code != 5:
raise ValueError('PolygonPrimitive code is 5')
super(AMPolygonPrimitive, self).__init__(code, exposure)
if vertices < 3 or vertices > 12:
raise ValueError('Number of vertices must be between 3 and 12')
self.vertices = vertices
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
vertices=self.vertices,
position="%.4g,%.4g" % self.position,
diameter='%.4g' % self.diameter,
rotation=str(self.rotation)
)
fmt = "{code},{exposure},{vertices},{position},{diameter},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
return Polygon(self.position, self.vertices, self.diameter / 2.0, 0, rotation=self.rotation, units=units, level_polarity=self._level_polarity)
class AMMoirePrimitive(AMPrimitive):
""" Aperture Macro Moire primitive. Code 6.
The moire primitive is a cross hair centered on concentric rings (annuli).
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.9:** Moire, primitive code 6.
Parameters
----------
code : int
Moire Primitive code. Must be 6.
position : tuple (<float>, <float>)
X and Y coordinates of moire center
diameter : float
outer diameter of outer ring.
ring_thickness : float
thickness of concentric rings.
gap : float
gap between concentric rings.
max_rings : float
maximum number of rings
crosshair_thickness : float
thickness of crosshairs
crosshair_length : float
length of crosshairs
rotation : float
moire rotation about the origin.
Returns
-------
MoirePrimitive : :class:`gerber.am_statements.AMMoirePrimitive`
An initialized AMMoirePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
diameter = float(modifiers[3])
ring_thickness = float(modifiers[4])
gap = float(modifiers[5])
max_rings = int(float(modifiers[6]))
crosshair_thickness = float(modifiers[7])
crosshair_length = float(modifiers[8])
rotation = float(modifiers[9])
return cls(code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation)
def __init__(self, code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation):
""" Initialize AMoirePrimitive
"""
if code != 6:
raise ValueError('MoirePrimitive code is 6')
super(AMMoirePrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.ring_thickness = ring_thickness
self.gap = gap
self.max_rings = max_rings
self.crosshair_thickness = crosshair_thickness
self.crosshair_length = crosshair_length
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
self.ring_thickness = inch(self.ring_thickness)
self.gap = inch(self.gap)
self.crosshair_thickness = inch(self.crosshair_thickness)
self.crosshair_length = inch(self.crosshair_length)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
self.ring_thickness = metric(self.ring_thickness)
self.gap = metric(self.gap)
self.crosshair_thickness = metric(self.crosshair_thickness)
self.crosshair_length = metric(self.crosshair_length)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
diameter=self.diameter,
ring_thickness=self.ring_thickness,
gap=self.gap,
max_rings=self.max_rings,
crosshair_thickness=self.crosshair_thickness,
crosshair_length=self.crosshair_length,
rotation=self.rotation
)
fmt = "{code},{position},{diameter},{ring_thickness},{gap},{max_rings},{crosshair_thickness},{crosshair_length},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
#raise NotImplementedError()
return None
class AMThermalPrimitive(AMPrimitive):
""" Aperture Macro Thermal primitive. Code 7.
The thermal primitive is a ring (annulus) interrupted by four gaps.
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.10:** Thermal, primitive code 7.
Parameters
----------
code : int
Thermal Primitive code. Must be 7.
position : tuple (<float>, <float>)
X and Y coordinates of thermal center
outer_diameter : float
outer diameter of thermal.
inner_diameter : float
inner diameter of thermal.
gap : float
gap thickness
rotation : float
thermal rotation about the origin.
Returns
-------
ThermalPrimitive : :class:`gerber.am_statements.AMThermalPrimitive`
An initialized AMThermalPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
outer_diameter = float(modifiers[3])
inner_diameter = float(modifiers[4])
gap = float(modifiers[5])
rotation = float(modifiers[6])
return cls(code, position, outer_diameter, inner_diameter, gap, rotation)
def __init__(self, code, position, outer_diameter, inner_diameter, gap, rotation):
if code != 7:
raise ValueError('ThermalPrimitive code is 7')
super(AMThermalPrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.outer_diameter = outer_diameter
self.inner_diameter = inner_diameter
self.gap = gap
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.outer_diameter = inch(self.outer_diameter)
self.inner_diameter = inch(self.inner_diameter)
self.gap = inch(self.gap)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.outer_diameter = metric(self.outer_diameter)
self.inner_diameter = metric(self.inner_diameter)
self.gap = metric(self.gap)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
outer_diameter=self.outer_diameter,
inner_diameter=self.inner_diameter,
gap=self.gap,
rotation=self.rotation
)
fmt = "{code},{position},{outer_diameter},{inner_diameter},{gap},{rotation}*"
return fmt.format(**data)
def _approximate_arc_cw(self, start_angle, end_angle, radius, center):
"""
Get an arc as a series of points
Parameters
----------
start_angle : The start angle in radians
end_angle : The end angle in radians
radius`: Radius of the arc
center : The center point of the arc (x, y) tuple
Returns
-------
array of point tuples
"""
# The total sweep
sweep_angle = end_angle - start_angle
num_steps = 10
angle_step = sweep_angle / num_steps
radius = radius
center = center
points = []
for i in range(num_steps + 1):
current_angle = start_angle + (angle_step * i)
nextx = (center[0] + math.cos(current_angle) * radius)
nexty = (center[1] + math.sin(current_angle) * radius)
points.append((nextx, nexty))
return points
def to_primitive(self, units):
# We start with calculating the top right section, then duplicate it
inner_radius = self.inner_diameter / 2.0
outer_radius = self.outer_diameter / 2.0
# Calculate the start angle relative to the horizontal axis
inner_offset_angle = asin(self.gap / 2.0 / inner_radius)
outer_offset_angle = asin(self.gap / 2.0 / outer_radius)
rotation_rad = math.radians(self.rotation)
inner_start_angle = inner_offset_angle + rotation_rad
inner_end_angle = math.pi / 2 - inner_offset_angle + rotation_rad
outer_start_angle = outer_offset_angle + rotation_rad
outer_end_angle = math.pi / 2 - outer_offset_angle + rotation_rad
outlines = []
aperture = Circle((0, 0), 0)
points = (self._approximate_arc_cw(inner_start_angle, inner_end_angle, inner_radius, self.position)
+ list(reversed(self._approximate_arc_cw(outer_start_angle, outer_end_angle, outer_radius, self.position))))
# Add in the last point since outlines should be closed
points.append(points[0])
# There are four outlines at rotated sections
for rotation in [0, 90.0, 180.0, 270.0]:
lines = []
prev_point = rotate_point(points[0], rotation, self.position)
for point in points[1:]:
cur_point = rotate_point(point, rotation, self.position)
lines.append(Line(prev_point, cur_point, aperture))
prev_point = cur_point
outlines.append(Outline(lines, units=units, level_polarity=self._level_polarity))
return outlines
class AMCenterLinePrimitive(AMPrimitive):
""" Aperture Macro Center Line primitive. Code 21.
The center line primitive is a rectangle defined by its width, height, and center point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.4:** Center Line, primitive code 21.
Parameters
----------
code : int
Center Line Primitive code. Must be 21.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
center : tuple (<float>, <float>)
X and Y coordinates of line center
rotation : float
rectangle rotation about its center.
Returns
-------
CenterLinePrimitive : :class:`gerber.am_statements.AMCenterLinePrimitive`
An initialized AMCenterLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
width = primitive.width
height = primitive.height
center = primitive.position
rotation = math.degrees(primitive.rotation)
return cls(21, 'on', width, height, center, rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
height = float(modifiers[3])
center = (float(modifiers[4]), float(modifiers[5]))
rotation = float(modifiers[6])
return cls(code, exposure, width, height, center, rotation)
def __init__(self, code, exposure, width, height, center, rotation):
if code != 21:
raise ValueError('CenterLinePrimitive code is 21')
super(AMCenterLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(center)
self.center = center
self.rotation = rotation
def to_inch(self):
self.center = tuple([inch(x) for x in self.center])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.center = tuple([metric(x) for x in self.center])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
center="%.4g,%.4g" % self.center,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{center},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
x = self.center[0]
y = self.center[1]
half_width = self.width / 2.0
half_height = self.height / 2.0
points = []
points.append((x - half_width, y + half_height))
points.append((x - half_width, y - half_height))
points.append((x + half_width, y - half_height))
points.append((x + half_width, y + half_height))
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(points[3], self.rotation, self.center)
for point in points:
cur_point = rotate_point(point, self.rotation, self.center)
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMLowerLeftLinePrimitive(AMPrimitive):
""" Aperture Macro Lower Left Line primitive. Code 22.
The lower left line primitive is a rectangle defined by its width, height, and the lower left point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.5:** Lower Left Line, primitive code 22.
Parameters
----------
code : int
Center Line Primitive code. Must be 22.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
lower_left : tuple (<float>, <float>)
X and Y coordinates of lower left corner
rotation : float
rectangle rotation about its origin.
Returns
-------
LowerLeftLinePrimitive : :class:`gerber.am_statements.AMLowerLeftLinePrimitive`
An initialized AMLowerLeftLinePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = 'on' if float(modifiers[1]) == 1 else 'off'
width = float(modifiers[2])
height = float(modifiers[3])
lower_left = (float(modifiers[4]), float(modifiers[5]))
rotation = float(modifiers[6])
return cls(code, exposure, width, height, lower_left, rotation)
def __init__(self, code, exposure, width, height, lower_left, rotation):
if code != 22:
raise ValueError('LowerLeftLinePrimitive code is 22')
super (AMLowerLeftLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(lower_left)
self.lower_left = lower_left
self.rotation = rotation
def to_inch(self):
self.lower_left = tuple([inch(x) for x in self.lower_left])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.lower_left = tuple([metric(x) for x in self.lower_left])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
lower_left="%.4g,%.4g" % self.lower_left,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{lower_left},{rotation}*"
return fmt.format(**data)
class AMUnsupportPrimitive(AMPrimitive):
@classmethod
def from_gerber(cls, primitive):
return cls(primitive)
def __init__(self, primitive):
super(AMUnsupportPrimitive, self).__init__(9999)
self.primitive = primitive
def to_inch(self):
pass
def to_metric(self):
pass
def to_gerber(self, settings=None):
return self.primitive
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import uuid
import mox
from oslo.config import cfg
import webob
from nova.api.openstack.compute import servers
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import glance
from nova.openstack.common import importutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
from nova.tests import matchers
from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
def return_server_not_found(*arg, **kwarg):
raise exception.NotFound()
def instance_update(context, instance_uuid, kwargs):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
return (inst, inst)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTest(test.TestCase):
def setUp(self):
super(ServerActionsControllerTest, self).setUp()
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update)
fakes.stub_out_glance(self.stubs)
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fake.stub_out_image_service(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = importutils.import_object(service_class)
self.sent_to_glance = {}
fakes.stub_out_glanceclient_create(self.stubs, self.sent_to_glance)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True)
self.uuid = FAKE_UUID
self.url = '/v2/fake/servers/%s/action' % self.uuid
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.controller = servers.Controller()
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
mock_method = MockSetAdminPassword()
self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
# note,the mock still contains the password.
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_not_a_string(self):
body = {'changePassword': {'adminPass': 1234}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_bad_request(self):
body = {'changePassword': {'pass': '12345'}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_empty_string(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': ''}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '')
def test_server_change_password_none(self):
body = {'changePassword': {'adminPass': None}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_not_found(self):
self.stubs.Set(db, 'instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
req, str(uuid.uuid4()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
CONF.password_length)
self.assertEqual(robj['location'], self_href)
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
body = {
'rebuild': {
'imageRef': image_uuid,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
body = {
'rebuild': {
'imageRef': image_href,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], image_uuid)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertTrue("adminPass" not in body['server'])
self.assertEqual(robj['location'], self_href)
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_with_too_large_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": {
256 * "k": "value"
}
}
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_rebuild, req,
FAKE_UUID, body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertTrue('personality' not in body['server'])
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertTrue('adminPass' not in body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
update = self.mox.CreateMockAnything()
self.stubs.Set(compute_api.API, 'update', update)
req = fakes.HTTPRequest.blank(self.url)
context = req.environ['nova.context']
update(context, mox.IgnoreArg(),
image_ref=self._image_href,
kernel_id="", ramdisk_id="",
task_state=task_states.REBUILDING,
expected_task_state=None,
progress=0, **attributes).AndReturn(
fakes.stub_instance(1, host='fake_host'))
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body)
def test_rebuild_when_kernel_not_exists(self):
def return_image_meta(*args, **kwargs):
image_meta_table = {
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_proper_kernel_ram(self):
instance_meta = {'kernel_id': None, 'ramdisk_id': None}
def fake_show(*args, **kwargs):
instance_meta['kernel_id'] = kwargs.get('kernel_id')
instance_meta['ramdisk_id'] = kwargs.get('ramdisk_id')
inst = fakes.stub_instance(INSTANCE_IDS[FAKE_UUID],
host='fake_host')
return inst
def return_image_meta(*args, **kwargs):
image_meta_table = {
'1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
self.stubs.Set(compute_api.API, 'update', fake_show)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(instance_meta['kernel_id'], 1)
self.assertEqual(instance_meta['ramdisk_id'], 2)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(compute_api.API, 'resize', resize_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_resize(req, FAKE_UUID, body)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_server_not_found(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.stubs.Set(compute_api.API, 'get', return_server_not_found)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_too_many_instances(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.TooManyInstances(message="TooManyInstance")
self.stubs.Set(compute_api.API, 'resize', fake_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(exception.TooManyInstances,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'resize', fake_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
self.assertEqual(self.confirm_resize_called, True)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'confirm_resize',
confirm_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'confirm_resize',
fake_confirm_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'revert_resize',
revert_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_revert_resize_server_not_found(self):
body = dict(revertResize=None)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob. exc.HTTPNotFound,
self.controller._action_revert_resize,
req, "bad_server_id", body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_revert_resize(req, FAKE_UUID, body)
self.assertEqual(self.revert_resize_called, True)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'revert_resize',
fake_revert_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def _do_test_create_volume_backed_image(self, extra_properties):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['createImage']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
props = dict(kernel_id=_fake_id('b'),
ramdisk_id=_fake_id('c'),
root_device_name='/dev/vda',
block_device_mapping=bdm)
original_image = dict(properties=props,
container_format='ami',
status='active',
is_public=True)
image_service.create(None, original_image)
def fake_block_device_mapping_get_all_by_instance(context, inst_id):
return [dict(volume_id=_fake_id('a'),
virtual_name=None,
volume_size=1,
device_name='vda',
snapshot_id=1,
delete_on_termination=False,
no_device=None)]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref=original_image['id'],
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace('http://localhost/v2/fake/images/', '')
image = image_service.show(None, image_id)
self.assertEquals(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEquals(properties['kernel_id'], _fake_id('b'))
self.assertEquals(properties['ramdisk_id'], _fake_id('c'))
self.assertEquals(properties['root_device_name'], '/dev/vda')
bdms = properties['block_device_mapping']
self.assertEquals(len(bdms), 1)
self.assertEquals(bdms[0]['device_name'], 'vda')
self.assertEquals(bdms[0]['snapshot_id'], snapshot['id'])
for k in extra_properties.keys():
self.assertEquals(properties[k], extra_properties[k])
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(CONF.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_locked(self):
def fake_locked(context, instance_uuid):
return {"name": "foo",
"uuid": FAKE_UUID,
"locked": True}
self.stubs.Set(db, 'instance_get_by_uuid', fake_locked)
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
class TestServerActionXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerActionXMLDeserializer, self).setUp()
self.deserializer = servers.ActionDeserializer()
def test_create_image(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
},
}
self.assertEquals(request['body'], expected)
def test_create_image_with_metadata(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="key1">value1</meta>
</metadata>
</createImage>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
"metadata": {"key1": "value1"},
},
}
self.assertEquals(request['body'], expected)
def test_change_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass="1234pass"/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "1234pass",
},
}
self.assertEquals(request['body'], expected)
def test_change_pass_no_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_change_pass_empty_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass=""/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "",
},
}
self.assertEquals(request['body'], expected)
def test_reboot(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"
type="HARD"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"reboot": {
"type": "HARD",
},
}
self.assertEquals(request['body'], expected)
def test_reboot_no_type(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"
flavorRef="http://localhost/flavors/3"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"resize": {"flavorRef": "http://localhost/flavors/3"},
}
self.assertEquals(request['body'], expected)
def test_resize_no_flavor_ref(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_confirm_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<confirmResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"confirmResize": None,
}
self.assertEquals(request['body'], expected)
def test_revert_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<revertResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"revertResize": None,
}
self.assertEquals(request['body'], expected)
def test_rebuild(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost/images/1">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"name": "new-server-test",
"imageRef": "http://localhost/images/1",
"metadata": {
"My Server Name": "Apache1",
},
"personality": [
{"path": "/etc/banner.txt", "contents": "Mg=="},
],
},
}
self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"imageRef": "http://localhost/images/1",
},
}
self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_rebuild_blank_name(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"
name=""/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import CTransaction, sha256
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, hex_str_to_bytes
from io import BytesIO
class DecodeScriptTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(multisig_script)))
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = bytes_to_hex_str(sha256(hex_str_to_bytes(cltv_script)))
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data Flow Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`While` loops, and supports gradient back-propagation via special "flow"
control flow dependencies.
@@handle
@@flow
@@read
@@unpack
@@split
@@write
@@pack
@@concat
@@grad
"""
def __init__(self, dtype, size=None, dynamic_size=None,
tensor_array_name=None,
handle=None, flow=None, name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
TensorArray.flow.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
dynamic_size = dynamic_size or False
self._dtype = dtype
with ops.op_scope([handle, size, flow], name, "TensorArray") as scope:
if handle:
self._handle = handle
else:
self._handle = gen_data_flow_ops._tensor_array(
dtype=dtype, size=size, dynamic_size=dynamic_size,
tensor_array_name=tensor_array_name, name=scope)
self._flow = flow or constant_op.constant(0, dtype=_dtypes.float32)
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def grad(self, source, flow=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
g_handle = gen_data_flow_ops._tensor_array_grad(
handle=self._handle, source=source, flow_in=flow or self.flow)
g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow or self.flow)
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray."""
value = gen_data_flow_ops._tensor_array_read(
handle=self._handle, index=index, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray."""
flow_out = gen_data_flow_ops._tensor_array_write(
handle=self._handle, index=index, value=value, flow_in=self._flow,
name=name)
# Size below is ignored
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def pack(self, name=None):
"""Return the values in the TensorArray as a packed `Tensor`."""
value = gen_data_flow_ops._tensor_array_pack(
handle=self._handle, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`."""
value, _ = gen_data_flow_ops._tensor_array_concat(
handle=self._handle, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def unpack(self, value, name=None):
"""Pack the values of a `Tensor` in the TensorArray."""
flow_out = gen_data_flow_ops._tensor_array_unpack(
handle=self._handle, value=value, flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray."""
with ops.op_scope(
[self._handle, value, lengths], name, "TensorArraySplit"):
lengths = math_ops.to_int64(lengths)
flow_out = gen_data_flow_ops._tensor_array_split(
handle=self._handle, value=value, lengths=lengths, flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
return gen_data_flow_ops._tensor_array_size(
handle=self._handle, flow_in=self.flow, name=name)
def close(self, name=None):
"""Close the current TensorArray."""
return gen_data_flow_ops._tensor_array_close(
handle=self._handle, name=name)
@ops.RegisterShape("TensorArray")
def _TensorArrayShape(op):
# size is a scalar
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
return [tensor_shape.vector(2)]
@ops.RegisterShape("TensorArrayRead")
def _TensorArrayReadShape(op):
# handle, index, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
# value
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("TensorArrayWrite")
def _TensorArrayWriteShape(op):
# handle, index, value, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArraySize")
def _TensorArraySizeShape(op):
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArrayClose")
def _TensorArrayCloseShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return []
@ops.RegisterShape("TensorArrayGrad")
def _TensorArrayGradShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return [tensor_shape.vector(2)]
@ops.RegisterShape("TensorArrayPack")
def _TensorArrayPackShape(op):
# handle, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
# value
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("TensorArrayConcat")
def _TensorArrayConcatShape(op):
# handle, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
# value, lengths
return [tensor_shape.unknown_shape(), tensor_shape.vector(None)]
@ops.RegisterShape("TensorArraySplit")
def _TensorArraySplitShape(op):
# handle, value, lengths, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[2].get_shape().merge_with(tensor_shape.vector(None))
op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArrayUnpack")
def _TensorArrayUnpackShape(op):
# handle, value, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
# pylint: enable=protected-access
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test neurom._neuritefunc functionality."""
from pathlib import Path
from math import pi, sqrt
from mock import patch, Mock
import numpy as np
from nose import tools as nt
from numpy.testing import assert_allclose
import scipy
import neurom as nm
from neurom.features import neuritefunc as _nf
from neurom.features import sectionfunc as sectionfunc
from neurom.geom import convex_hull
from neurom.features.tests.utils import _close
DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data'
H5_PATH = DATA_PATH / 'h5/v1'
SWC_PATH = DATA_PATH / 'swc'
SIMPLE = nm.load_neuron(Path(SWC_PATH, 'simple.swc'))
NRN = nm.load_neuron(Path(H5_PATH, 'Neuron.h5'))
def test_principal_direction_extents():
principal_dir = list(_nf.principal_direction_extents(SIMPLE))
assert_allclose(principal_dir,
(14.736052694538641, 12.105102672688004))
# test with a realistic neuron
nrn = nm.load_neuron(Path(H5_PATH, 'bio_neuron-000.h5'))
p_ref = [1672.9694359427331, 142.43704397865031, 226.45895382204986,
415.50612748523838, 429.83008974193206, 165.95410536922873,
346.83281498399697]
p = _nf.principal_direction_extents(nrn)
_close(np.array(p), np.array(p_ref))
def test_n_bifurcation_points():
nt.assert_equal(_nf.n_bifurcation_points(SIMPLE.neurites[0]), 1)
nt.assert_equal(_nf.n_bifurcation_points(SIMPLE.neurites[1]), 1)
nt.assert_equal(_nf.n_bifurcation_points(SIMPLE.neurites), 2)
def test_n_forking_points():
nt.assert_equal(_nf.n_forking_points(SIMPLE.neurites[0]), 1)
nt.assert_equal(_nf.n_forking_points(SIMPLE.neurites[1]), 1)
nt.assert_equal(_nf.n_forking_points(SIMPLE.neurites), 2)
def test_n_leaves():
nt.assert_equal(_nf.n_leaves(SIMPLE.neurites[0]), 2)
nt.assert_equal(_nf.n_leaves(SIMPLE.neurites[1]), 2)
nt.assert_equal(_nf.n_leaves(SIMPLE.neurites), 4)
def test_total_area_per_neurite():
def surface(r0, r1, h):
return pi * (r0 + r1) * sqrt((r0 - r1) ** 2 + h ** 2)
basal_area = surface(1, 1, 5) + surface(1, 0, 5) + surface(1, 0, 6)
ret = _nf.total_area_per_neurite(SIMPLE,
neurite_type=nm.BASAL_DENDRITE)
nt.assert_almost_equal(ret[0], basal_area)
axon_area = surface(1, 1, 4) + surface(1, 0, 5) + surface(1, 0, 6)
ret = _nf.total_area_per_neurite(SIMPLE, neurite_type=nm.AXON)
nt.assert_almost_equal(ret[0], axon_area)
ret = _nf.total_area_per_neurite(SIMPLE)
nt.ok_(np.allclose(ret, [basal_area, axon_area]))
def test_total_volume_per_neurite():
vol = _nf.total_volume_per_neurite(NRN)
nt.eq_(len(vol), 4)
# calculate the volumes by hand and compare
vol2 = [sum(sectionfunc.section_volume(s) for s in n.iter_sections())
for n in NRN.neurites
]
nt.eq_(vol, vol2)
# regression test
ref_vol = [271.94122143951864, 281.24754646913954,
274.98039928781355, 276.73860261723024]
nt.ok_(np.allclose(vol, ref_vol))
def test_neurite_volume_density():
vol = np.array(_nf.total_volume_per_neurite(NRN))
hull_vol = np.array([convex_hull(n).volume for n in nm.iter_neurites(NRN)])
vol_density = _nf.neurite_volume_density(NRN)
nt.eq_(len(vol_density), 4)
nt.ok_(np.allclose(vol_density, vol / hull_vol))
ref_density = [0.43756606998299519, 0.52464681266899216,
0.24068543213643726, 0.26289304906104355]
assert_allclose(vol_density, ref_density)
def test_neurite_volume_density_failed_convex_hull():
with patch('neurom.features.neuritefunc.convex_hull',
side_effect=scipy.spatial.qhull.QhullError('boom')):
vol_density = _nf.neurite_volume_density(NRN)
nt.ok_(vol_density, np.nan)
def test_terminal_path_length_per_neurite():
terminal_distances = _nf.terminal_path_lengths_per_neurite(SIMPLE)
assert_allclose(terminal_distances,
(5 + 5., 5 + 6., 4. + 6., 4. + 5))
terminal_distances = _nf.terminal_path_lengths_per_neurite(SIMPLE,
neurite_type=nm.AXON)
assert_allclose(terminal_distances,
(4. + 6., 4. + 5.))
def test_total_length_per_neurite():
total_lengths = _nf.total_length_per_neurite(SIMPLE)
assert_allclose(total_lengths,
(5. + 5. + 6., 4. + 5. + 6.))
def test_n_segments():
n_segments = _nf.n_segments(SIMPLE)
nt.eq_(n_segments, 6)
def test_n_neurites():
n_neurites = _nf.n_neurites(SIMPLE)
nt.eq_(n_neurites, 2)
def test_n_sections():
n_sections = _nf.n_sections(SIMPLE)
nt.eq_(n_sections, 6)
def test_neurite_volumes():
# note: cannot use SIMPLE since it lies in a plane
total_volumes = _nf.total_volume_per_neurite(NRN)
assert_allclose(total_volumes,
[271.94122143951864, 281.24754646913954,
274.98039928781355, 276.73860261723024]
)
def test_section_path_lengths():
path_lengths = list(_nf.section_path_lengths(SIMPLE))
assert_allclose(path_lengths,
(5., 10., 11., # type 3, basal dendrite
4., 10., 9.)) # type 2, axon
def test_section_term_lengths():
term_lengths = list(_nf.section_term_lengths(SIMPLE))
assert_allclose(term_lengths,
(5., 6., 6., 5.))
def test_section_bif_lengths():
bif_lengths = list(_nf.section_bif_lengths(SIMPLE))
assert_allclose(bif_lengths,
(5., 4.))
def test_section_end_distances():
end_dist = list(_nf.section_end_distances(SIMPLE))
assert_allclose(end_dist,
[5.0, 5.0, 6.0, 4.0, 6.0, 5.0])
def test_section_partition_pairs():
part_pairs = list(_nf.partition_pairs(SIMPLE))
assert_allclose(part_pairs,
[(1.0, 1.0), (1.0, 1.0)])
def test_section_bif_radial_distances():
bif_rads = list(_nf.section_bif_radial_distances(SIMPLE))
assert_allclose(bif_rads,
[5., 4.])
trm_rads = list(_nf.section_bif_radial_distances(NRN, neurite_type=nm.AXON))
assert_allclose(trm_rads,
[8.842008561870646,
16.7440421479104,
23.070306480850533,
30.181121708042546,
36.62766031035137,
43.967487830324885,
51.91971040624528,
59.427722328770955,
66.25222507299583,
74.05119754074926])
def test_section_term_radial_distances():
trm_rads = list(_nf.section_term_radial_distances(SIMPLE))
assert_allclose(trm_rads,
[7.0710678118654755, 7.810249675906654, 7.211102550927978, 6.4031242374328485])
trm_rads = list(_nf.section_term_radial_distances(NRN, neurite_type=nm.APICAL_DENDRITE))
assert_allclose(trm_rads,
[16.22099879395879,
25.992977561564082,
33.31600613822663,
42.721314797308175,
52.379508081911546,
59.44327819128149,
67.07832724133213,
79.97743930553612,
87.10434825508366,
97.25246040544428,
99.58945832481642])
def test_number_of_sections_per_neurite():
sections = _nf.number_of_sections_per_neurite(SIMPLE)
assert_allclose(sections,
(3, 3))
def test_section_branch_orders():
branch_orders = list(_nf.section_branch_orders(SIMPLE))
assert_allclose(branch_orders,
(0, 1, 1, # type 3, basal dendrite
0, 1, 1)) # type 2, axon
def test_section_bif_branch_orders():
bif_branch_orders = list(_nf.section_bif_branch_orders(SIMPLE))
assert_allclose(bif_branch_orders,
(0, # type 3, basal dendrite
0)) # type 2, axon
def test_section_term_branch_orders():
term_branch_orders = list(_nf.section_term_branch_orders(SIMPLE))
assert_allclose(term_branch_orders,
(1, 1, # type 3, basal dendrite
1, 1)) # type 2, axon
def test_section_radial_distances():
radial_distances = _nf.section_radial_distances(SIMPLE)
assert_allclose(radial_distances,
(5.0, sqrt(5**2 + 5**2), sqrt(6**2 + 5**2), # type 3, basal dendrite
4.0, sqrt(6**2 + 4**2), sqrt(5**2 + 4**2))) # type 2, axon
def test_local_bifurcation_angles():
local_bif_angles = list(_nf.local_bifurcation_angles(SIMPLE))
assert_allclose(local_bif_angles,
(pi, pi))
def test_remote_bifurcation_angles():
remote_bif_angles = list(_nf.remote_bifurcation_angles(SIMPLE))
assert_allclose(remote_bif_angles,
(pi, pi))
def test_partition():
partition = list(_nf.bifurcation_partitions(SIMPLE))
assert_allclose(partition,
(1.0, 1.0))
def test_partition_asymmetry():
partition = list(_nf.partition_asymmetries(SIMPLE))
assert_allclose(partition,
(0.0, 0.0))
partition = list(_nf.partition_asymmetries(SIMPLE, variant='length'))
assert_allclose(partition,
(0.0625, 0.06666666666666667))
nt.assert_raises(ValueError, _nf.partition_asymmetries, SIMPLE, variant='unvalid-variant')
def test_segment_lengths():
segment_lengths = _nf.segment_lengths(SIMPLE)
assert_allclose(segment_lengths,
(5.0, 5.0, 6.0, # type 3, basal dendrite
4.0, 6.0, 5.0)) # type 2, axon
def test_segment_areas():
result = _nf.segment_areas(SIMPLE)
assert_allclose(result,
[31.415927,
16.019042,
19.109562,
25.132741,
19.109562,
16.019042])
def test_segment_volumes():
expected = [
15.70796327,
5.23598776,
6.28318531,
12.56637061,
6.28318531,
5.23598776,
]
result = _nf.segment_volumes(SIMPLE)
assert_allclose(result, expected)
def test_segment_midpoints():
midpoints = np.array(_nf.segment_midpoints(SIMPLE))
assert_allclose(midpoints,
np.array([[0., (5. + 0) / 2, 0.], # trunk type 2
[-2.5, 5., 0.],
[3., 5., 0.],
[0., (-4. + 0) / 2., 0.], # trunk type 3
[3., -4., 0.],
[-2.5, -4., 0.]]))
def test_segment_radial_distances():
"""midpoints on segments."""
radial_distances = _nf.segment_radial_distances(SIMPLE)
assert_allclose(radial_distances,
[2.5, sqrt(2.5**2 + 5**2), sqrt(3**2 + 5**2), 2.0, 5.0, sqrt(2.5**2 + 4**2)])
def test_segment_path_lengths():
pathlengths = _nf.segment_path_lengths(SIMPLE)
assert_allclose(pathlengths, [5., 10., 11., 4., 10., 9.])
pathlengths = _nf.segment_path_lengths(NRN)[:5]
assert_allclose(pathlengths, [0.1, 1.332525, 2.530149, 3.267878, 4.471462])
def test_principal_direction_extents():
principal_dir = list(_nf.principal_direction_extents(SIMPLE))
assert_allclose(principal_dir,
(14.736052694538641, 12.105102672688004))
def test_section_taper_rates():
assert_allclose(list(_nf.section_taper_rates(NRN.neurites[0]))[:10],
[0.06776235492169848,
0.0588716599404923,
0.03791571485186163,
0.04674653812192691,
-0.026399800285566058,
-0.026547582897720887,
-0.045038414440432537,
0.02083822978267914,
-0.0027721371791201038,
0.0803069042861474],
atol=1e-4)
|
|
from cattle import ApiError
from common import * # NOQA
def test_agent_unique(super_client):
agents = super_client.list_agent(uri='sim://unique')
if len(agents) == 0:
agent = super_client.create_agent(uri='sim://unique')
agent = super_client.wait_success(agent)
assert agent.state == 'active'
agent.deactivate()
try:
super_client.create_agent(uri='sim://unique')
assert False
except ApiError, e:
assert e.error.code == 'NotUnique'
pass
def test_pagination(context):
client = context.client
name = random_str()
containers = []
for i in range(4):
c = client.create_container(imageUuid=context.image_uuid, name=name)
containers.append(c)
for c in containers:
client.wait_success(c)
r = client.list_container(name=name)
assert len(r) == 4
try:
assert r.pagination.next is None
except AttributeError:
pass
collected = {}
r = client.list_container(name=name, limit=2)
assert len(r) == 2
assert r.pagination.next is not None
for i in r:
collected[i.id] = True
r = r.next()
assert len(r) == 2
try:
assert r.pagination.next is None
except AttributeError:
pass
for i in r:
collected[i.id] = True
assert len(collected) == 4
def test_pagination_include(super_client, new_context):
context = new_context
client = new_context.client
host = context.host
name = random_str()
container_ids = []
containers = []
for i in range(5):
c = client.create_container(imageUuid=context.image_uuid,
name=name,
requestedHostId=host.id)
c = super_client.reload(c)
containers.append(c)
container_ids.append(c.id)
for c in containers:
client.wait_success(c)
assert len(containers[0].instanceHostMaps()) == 1
assert host.id == containers[0].instanceHostMaps()[0].host().id
r = super_client.list_container(name=name)
assert len(r) == 5
for c in r:
assert len(c.instanceHostMaps()) == 1
assert c.instanceHostMaps()[0].hostId == host.id
collected = {}
r = super_client.list_container(name=name, include='instanceHostMaps',
limit=2)
assert len(r) == 2
for c in r:
collected[c.id] = True
assert len(c.instanceHostMaps) == 1
assert c.instanceHostMaps[0].hostId == host.id
r = r.next()
assert len(r) == 2
for c in r:
collected[c.id] = True
assert len(c.instanceHostMaps) == 1
assert c.instanceHostMaps[0].hostId == host.id
r = r.next()
assert len(r) == 1
for c in r:
collected[c.id] = True
assert len(c.instanceHostMaps) == 1
assert c.instanceHostMaps[0].hostId == host.id
assert not r.pagination.partial
maps = []
for id in container_ids:
maps.extend(super_client.list_instanceHostMap(hostId=host.id,
instanceId=id))
assert len(maps) == 5
maps_from_include = []
r = super_client.list_host(include='instanceHostMaps', limit=2,
accountId=host.accountId)
while True:
for h in r:
if h.id == host.id:
assert len(h.instanceHostMaps) <= 2
for m in h.instanceHostMaps:
if m.instanceId in container_ids and \
m.instanceId not in maps_from_include:
maps_from_include.append(m.instanceId)
for c in containers:
if c.id == m.instanceId:
client.wait_success(c.stop())
try:
r = r.next()
except AttributeError:
break
assert len(maps) == len(maps_from_include)
del maps_from_include[:]
r = super_client.list_host(include='instances', limit=2,
accountId=host.accountId)
while True:
for h in r:
if h.id == host.id:
for c in h.instances:
if c.id in container_ids and \
c.id not in maps_from_include:
maps_from_include.append(c.id)
client.wait_success(c.start())
try:
r = r.next()
except AttributeError:
break
assert len(maps) == len(maps_from_include)
def test_include_left_join(super_client, context):
container = context.create_container_no_success(startOnCreate=False)
container = context.wait_for_state(container, 'stopped')
c = super_client.by_id('container', container.id,
include='instanceHostMaps')
assert container.id == c.id
def test_include(super_client, context):
container = context.create_container(name='include_test')
container = super_client.reload(container)
for link_name in ['instanceHostMaps', 'instancehostmaps']:
found = False
for c in super_client.list_container(name_like='include_test%'):
if c.id == container.id:
found = True
assert len(c.instanceHostMaps()) == 1
assert callable(c.instanceHostMaps)
assert found
found = False
for c in super_client.list_container(include=link_name,
name_like='include_test%'):
if c.id == container.id:
found = True
assert len(c.instanceHostMaps) == 1
assert found
c = super_client.by_id('container', container.id)
assert callable(c.instanceHostMaps)
c = super_client.by_id('container', container.id, include=link_name)
assert len(c.instanceHostMaps) == 1
def test_limit(super_client):
result = super_client.list_container()
assert result.pagination.limit == 100
result = super_client.list_container(limit=105)
assert result.pagination.limit == 105
result = super_client.list_container(limit=10005)
assert result.pagination.limit == 3000
def test_schema_boolean_default(client):
con_schema = client.schema.types['container']
assert isinstance(con_schema.resourceFields.startOnCreate.default, bool)
def test_schema_self_link(client):
con_schema = client.schema.types['container']
assert con_schema.links.self is not None
assert con_schema.links.self.startswith("http")
def test_child_map_include(super_client, context):
container = context.create_container()
cs = super_client.list_container(uuid=container.uuid, include='hosts')
assert cs[0].hosts[0].uuid is not None
assert len(cs[0].hosts) == 1
hs = super_client.list_host(uuid=cs[0].hosts[0].uuid,
include='instances')
found = False
for i in hs[0].instances:
if i.uuid == cs[0].uuid:
found = True
assert found
def test_child_map(super_client, context):
container = context.create_container()
hosts = super_client.reload(container).hosts()
assert len(hosts) == 1
assert hosts[0].type == 'host'
def test_fields_on_include(super_client, context):
c = context.create_container()
host = super_client.by_id_host(context.host.id,
include='instances')
assert host is not None
found = False
for instance in host.instances:
if instance.id == c.id:
assert instance.transitioning == 'no'
assert 'stop' in instance
assert callable(instance.stop)
assert len(instance.links) > 1
found = True
break
assert found
def test_state_enum(super_client):
container_schema = super_client.schema.types['container']
states = set([
'creating',
'migrating',
'purged',
'purging',
'removed',
'removing',
'requested',
'restarting',
'restoring',
'running',
'starting',
'stopped',
'stopping',
'updating-running',
'updating-stopped',
'error',
'erroring'
])
assert container_schema.resourceFields['state'].type == 'enum'
assert states == set(container_schema.resourceFields['state'].options)
def test_actions_based_on_state(context):
c = context.create_container()
assert set(c.actions.keys()) == set(['migrate', 'restart', 'stop',
'update', 'execute', 'logs',
'proxy', 'setlabels'])
def test_include_user_not_auth_map(client):
client.list_host(include='instances')
def test_map_user_not_auth_map(context):
c = context.create_container()
assert len(c.hosts()) == 1
def test_role_option(admin_user_client, client, random_str, context):
c = admin_user_client.create_api_key(name=random_str,
accountId=context.account.id)
c = admin_user_client.wait_success(c)
assert c.state == 'active'
creds = admin_user_client.list_credential(name=random_str)
assert len(creds) == 1
creds = admin_user_client.list_credential(name=random_str,
_role='user')
assert len(creds) == 0
creds = client.list_credential(name=random_str, _role='superadmin')
assert len(creds) == 0
schemas = [x for x in admin_user_client.list_schema(_role='project')
if x.id == 'externalHandler']
assert len(schemas) == 0
def test_query_length(admin_user_client):
big = 'a' * 8192
admin_user_client.list_account(name=big)
bigger = 'a' * (16384 - 512)
admin_user_client.list_account(name=bigger)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util.deprecation import deprecated
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
@deprecated(None, "Please switch to tf.train.assert_global_step")
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
@deprecated(None, "Please switch to tf.train.get_global_step")
def get_global_step(graph=None):
return training_util.get_global_step(graph)
@deprecated(None, "Please switch to tf.train.create_global_step")
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
@deprecated(None, "Please switch to tf.train.get_or_create_global_step")
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None, use_resource=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections if collections is not None
else [ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = list(set(collections))
getter = variable_scope.get_variable
if custom_getter is not None:
getter = functools.partial(custom_getter,
reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
return getter(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
use_resource=use_resource)
@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None, partitioner=None,
custom_getter=None, use_resource=None):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device,
partitioner=partitioner, custom_getter=custom_getter,
use_resource=use_resource)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldnt find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable' %
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasn\'t found' % var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
else:
return var.op.name
# TODO(nsilberman): add flag to load exponential moving averages instead
#
# TODO(sguada): Update docs in slim/g3doc/index.md to describe
# the new feature where the var_list dictionary can have values that
# are each a list of Variables.
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.items():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
else:
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
If ignore_missing_vars is True and no variables are found in the checkpoint
it returns None.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
`None`, it would return `no_op(), None`.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation. If no matching variables were found in the checkpoint
then `None` is returned.
Raises:
ValueError: If var_list is empty.
"""
if not var_list:
raise ValueError('var_list cannot be empty')
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
if var_list:
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
else:
logging.warning('No Variables to restore')
return None
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list, include_patterns=None, exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules.
A variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules.
A variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
|
#
# Copyright 2015 NEC Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import json
from unittest import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
from aodh import evaluator
from aodh.evaluator import event as event_evaluator
from aodh.storage import models
from aodh.tests import constants
from aodh.tests.unit.evaluator import base
class TestEventAlarmEvaluate(base.TestEvaluatorBase):
EVALUATOR = event_evaluator.EventAlarmEvaluator
@staticmethod
def _alarm(**kwargs):
alarm_id = kwargs.get('id') or uuidutils.generate_uuid()
return models.Alarm(name=kwargs.get('name', alarm_id),
type='event',
enabled=True,
alarm_id=alarm_id,
description='desc',
state=kwargs.get('state', 'insufficient data'),
state_reason='reason',
severity='critical',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
ok_actions=[],
insufficient_data_actions=[],
alarm_actions=[],
repeat_actions=kwargs.get('repeat', False),
user_id='user',
project_id=kwargs.get('project', ''),
time_constraints=[],
rule=dict(event_type=kwargs.get('event_type', '*'),
query=kwargs.get('query', [])))
@staticmethod
def _event(**kwargs):
return {'message_id': kwargs.get('id') or uuidutils.generate_uuid(),
'event_type': kwargs.get('event_type', 'type0'),
'traits': kwargs.get('traits', [])}
def _setup_alarm_storage(self, alarms):
self._stored_alarms = {a.alarm_id: copy.deepcopy(a) for a in alarms}
self._update_history = []
def get_alarms(**kwargs):
return (a for a in self._stored_alarms.values())
def update_alarm(alarm):
self._stored_alarms[alarm.alarm_id] = copy.deepcopy(alarm)
self._update_history.append(dict(alarm_id=alarm.alarm_id,
state=alarm.state))
self.storage_conn.get_alarms.side_effect = get_alarms
self.storage_conn.update_alarm.side_effect = update_alarm
def _setup_alarm_notifier(self):
self._notification_history = []
def notify(alarm, previous, reason, data):
self._notification_history.append(dict(alarm_id=alarm.alarm_id,
state=alarm.state,
previous=previous,
reason=reason,
data=data))
self.notifier.notify.side_effect = notify
def _do_test_event_alarm(self, alarms, events,
expect_db_queries=None,
expect_alarm_states=None,
expect_alarm_updates=None,
expect_notifications=None):
self._setup_alarm_storage(alarms)
self._setup_alarm_notifier()
self.evaluator.evaluate_events(events)
if expect_db_queries is not None:
expected = [mock.call(enabled=True,
type='event',
project_id=p) for p in expect_db_queries]
self.assertEqual(expected,
self.storage_conn.get_alarms.call_args_list)
if expect_alarm_states is not None:
for alarm_id, state in expect_alarm_states.items():
self.assertEqual(state, self._stored_alarms[alarm_id].state)
if expect_alarm_updates is not None:
self.assertEqual(len(expect_alarm_updates),
len(self._update_history))
for alarm, h in zip(expect_alarm_updates, self._update_history):
expected = dict(alarm_id=alarm.alarm_id,
state=evaluator.ALARM)
self.assertEqual(expected, h)
if expect_notifications is not None:
self.assertEqual(len(expect_notifications),
len(self._notification_history))
for n, h in zip(expect_notifications, self._notification_history):
alarm = n['alarm']
event = n['event']
previous = n.get('previous', evaluator.UNKNOWN)
reason = ('Event <id=%(e)s,event_type=%(type)s> hits the '
'query <query=%(query)s>.') % {
'e': event['message_id'],
'type': event['event_type'],
'query': json.dumps(alarm.rule['query'], sort_keys=True)}
data = {'type': 'event', 'event': event}
expected = dict(alarm_id=alarm.alarm_id,
state=evaluator.ALARM,
previous=previous,
reason=reason,
data=data)
self.assertEqual(expected, h)
def test_fire_alarm_in_the_same_project_id(self):
alarm = self._alarm(project='project1')
event = self._event(traits=[['project_id', 1, 'project1']])
self._do_test_event_alarm(
[alarm], [event],
expect_db_queries=['project1'],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_fire_alarm_in_the_same_tenant_id(self):
alarm = self._alarm(project='project1')
event = self._event(traits=[['tenant_id', 1, 'project1']])
self._do_test_event_alarm(
[alarm], [event],
expect_db_queries=['project1'],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_fire_alarm_in_project_none(self):
alarm = self._alarm(project='')
event = self._event()
self._do_test_event_alarm(
[alarm], [event],
expect_db_queries=[''],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_continue_following_evaluation_after_exception(self):
alarms = [
self._alarm(id=1),
self._alarm(id=2),
]
event = self._event()
original = self.evaluator._fire_alarm
with mock.patch.object(event_evaluator.EventAlarmEvaluator,
'_fire_alarm') as _fire_alarm:
def _side_effect(*args, **kwargs):
_fire_alarm.side_effect = original
return Exception('boom')
_fire_alarm.side_effect = _side_effect
self._do_test_event_alarm(
alarms, [event],
expect_alarm_states={alarms[0].alarm_id: evaluator.UNKNOWN,
alarms[1].alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarms[1]],
expect_notifications=[dict(alarm=alarms[1], event=event)])
def test_skip_event_missing_event_type(self):
alarm = self._alarm()
event = {'message_id': uuidutils.generate_uuid(), 'traits': []}
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_skip_event_missing_message_id(self):
alarm = self._alarm()
event = {'event_type': 'type1', 'traits': []}
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_continue_alarming_when_repeat_actions_enabled(self):
alarm = self._alarm(repeat=True, state=evaluator.ALARM)
event = self._event()
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event,
previous=evaluator.ALARM)])
def test_do_not_continue_alarming_when_repeat_actions_disabled(self):
alarm = self._alarm(repeat=False, state=evaluator.ALARM)
event = self._event()
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[],
expect_notifications=[])
def test_skip_uninterested_event_type(self):
alarm = self._alarm(event_type='compute.instance.exists')
event = self._event(event_type='compute.instance.update')
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_fire_alarm_event_type_pattern_matched(self):
alarm = self._alarm(event_type='compute.instance.*')
event = self._event(event_type='compute.instance.update')
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_skip_event_type_pattern_unmatched(self):
alarm = self._alarm(event_type='compute.instance.*')
event = self._event(event_type='dummy.compute.instance')
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_fire_alarm_query_matched_string(self):
alarm = self._alarm(query=[dict(field="traits.state",
value="stopped",
op="eq")])
event = self._event(traits=[['state', 1, 'stopped']])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_skip_query_unmatched_string(self):
alarm = self._alarm(query=[dict(field="traits.state",
value="stopped",
op="eq")])
event = self._event(traits=[['state', 1, 'active']])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_fire_alarm_query_matched_integer(self):
alarm = self._alarm(query=[dict(field="traits.instance_type_id",
type="integer",
value="5",
op="eq")])
event = self._event(traits=[['instance_type_id', 2, 5]])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_skip_query_unmatched_integer(self):
alarm = self._alarm(query=[dict(field="traits.instance_type_id",
type="integer",
value="5",
op="eq")])
event = self._event(traits=[['instance_type_id', 2, 6]])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_fire_alarm_query_matched_float(self):
alarm = self._alarm(query=[dict(field="traits.io_read_kbs",
type="float",
value="123.456",
op="eq")])
event = self._event(traits=[['io_read_kbs', 3, 123.456]])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_skip_query_unmatched_float(self):
alarm = self._alarm(query=[dict(field="traits.io_read_kbs",
type="float",
value="123.456",
op="eq")])
event = self._event(traits=[['io_read_kbs', 3, 456.123]])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_fire_alarm_query_matched_datetime(self):
alarm = self._alarm(query=[dict(field="traits.created_at",
type="datetime",
value="2015-09-01T18:52:27.214309",
op="eq")])
event = self._event(traits=[['created_at', 4,
'2015-09-01T18:52:27.214309']])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=event)])
def test_skip_query_unmatched_datetime(self):
alarm = self._alarm(query=[dict(field="traits.created_at",
type="datetime",
value="2015-09-01T18:52:27.214309",
op="eq")])
event = self._event(traits=[['created_at', 4,
'2015-09-02T18:52:27.214309']])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_skip_alarm_due_to_uncompareable_trait(self):
alarm = self._alarm(query=[dict(field="traits.created_at",
type="datetime",
value="2015-09-01T18:52:27.214309",
op="eq")])
event = self._event(traits=[['created_at', 3, 123.456]])
self._do_test_event_alarm(
[alarm], [event],
expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN},
expect_alarm_updates=[],
expect_notifications=[])
def test_event_alarm_cache_hit(self):
alarm = self._alarm(project='project2', event_type='none')
events = [
self._event(traits=[['project_id', 1, 'project2']]),
self._event(traits=[['project_id', 1, 'project2']]),
]
self._do_test_event_alarm([alarm], events,
expect_db_queries=['project2'])
def test_event_alarm_cache_updated_after_fired(self):
alarm = self._alarm(project='project2', event_type='type1',
repeat=False)
events = [
self._event(event_type='type1',
traits=[['project_id', 1, 'project2']]),
self._event(event_type='type1',
traits=[['project_id', 1, 'project2']]),
]
self._do_test_event_alarm(
[alarm], events,
expect_db_queries=['project2'],
expect_alarm_states={alarm.alarm_id: evaluator.ALARM},
expect_alarm_updates=[alarm],
expect_notifications=[dict(alarm=alarm, event=events[0])])
def test_event_alarm_caching_disabled(self):
alarm = self._alarm(project='project2', event_type='none')
events = [
self._event(traits=[['project_id', 1, 'project2']]),
self._event(traits=[['project_id', 1, 'project2']]),
]
self.evaluator.conf.event_alarm_cache_ttl = 0
self._do_test_event_alarm([alarm], events,
expect_db_queries=['project2', 'project2'])
@mock.patch.object(timeutils, 'utcnow')
def test_event_alarm_cache_expired(self, mock_utcnow):
alarm = self._alarm(project='project2', event_type='none')
events = [
self._event(traits=[['project_id', 1, 'project2']]),
self._event(traits=[['project_id', 1, 'project2']]),
]
mock_utcnow.side_effect = [
datetime.datetime(2015, 1, 1, 0, 0, 0),
datetime.datetime(2015, 1, 1, 1, 0, 0),
datetime.datetime(2015, 1, 1, 1, 1, 0),
]
self._do_test_event_alarm([alarm], events,
expect_db_queries=['project2', 'project2'])
def test_event_alarm_cache_miss(self):
events = [
self._event(traits=[['project_id', 1, 'project2']]),
self._event(traits=[['project_id', 1, 'project3']]),
]
self._do_test_event_alarm([], events,
expect_db_queries=['project2', 'project3'])
|
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.controllers import repaint_overlay
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
def write_test_result(filesystem, port, results_directory, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
root_output_dir = results_directory
writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
writer.write_stderr(driver_output.error)
for failure in failures:
# FIXME: Instead of this long 'if' block, each failure class might
# have a responsibility for writing a test result.
if isinstance(failure, (test_failures.FailureMissingResult,
test_failures.FailureTextMismatch,
test_failures.FailureTestHarnessAssertion)):
writer.write_text_files(driver_output.text, expected_driver_output.text)
writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
elif isinstance(failure, test_failures.FailureMissingImage):
writer.write_image_files(driver_output.image, expected_image=None)
elif isinstance(failure, test_failures.FailureMissingImageHash):
writer.write_image_files(driver_output.image, expected_driver_output.image)
elif isinstance(failure, test_failures.FailureImageHashMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
writer.write_image_diff_files(driver_output.image_diff)
elif isinstance(failure, (test_failures.FailureAudioMismatch,
test_failures.FailureMissingAudio)):
writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
writer.write_crash_log(crashed_driver_output.crash_log)
elif isinstance(failure, test_failures.FailureLeak):
writer.write_leak_log(driver_output.leak_log)
elif isinstance(failure, test_failures.FailureReftestMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
diff_image, err_str = port.diff_image(expected_driver_output.image, driver_output.image)
if diff_image:
writer.write_image_diff_files(diff_image)
else:
_log.warn('ref test mismatch did not produce an image diff.')
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
else:
assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
if expected_driver_output is not None:
writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)
class TestResultWriter(object):
"""A class which handles all writing operations to the result directory."""
# Filename pieces when writing failures to the test results directory.
FILENAME_SUFFIX_ACTUAL = "-actual"
FILENAME_SUFFIX_EXPECTED = "-expected"
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_STDERR = "-stderr"
FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
FILENAME_SUFFIX_SAMPLE = "-sample"
FILENAME_SUFFIX_LEAK_LOG = "-leak-log"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
FILENAME_SUFFIX_OVERLAY = "-overlay.html"
def __init__(self, filesystem, port, root_output_dir, test_name):
self._filesystem = filesystem
self._port = port
self._root_output_dir = root_output_dir
self._test_name = test_name
def _make_output_directory(self):
"""Creates the output directory (if needed) for a given test filename."""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
fs.maybe_make_directory(fs.dirname(output_filename))
def output_filename(self, modifier):
"""Returns a filename inside the output dir that contains modifier.
For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
Args:
modifier: a string to replace the extension of filename with
Return:
The absolute path to the output filename
"""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
return fs.splitext(output_filename)[0] + modifier
def _write_file(self, path, contents):
if contents is not None:
self._make_output_directory()
self._filesystem.write_binary_file(path, contents)
def _output_testname(self, modifier):
fs = self._filesystem
return fs.splitext(fs.basename(self._test_name))[0] + modifier
def write_output_files(self, file_type, output, expected):
"""Writes the test output, the expected output in the results directory.
The full output filename of the actual, for example, will be
<filename>-actual<file_type>
For instance,
my_test-actual.txt
Args:
file_type: A string describing the test output file type, e.g. ".txt"
output: A string containing the test output
expected: A string containing the expected test output
"""
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
self._write_file(actual_filename, output)
self._write_file(expected_filename, expected)
def write_stderr(self, error):
filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
self._write_file(filename, error)
def write_crash_log(self, crash_log):
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
self._write_file(filename, crash_log.encode('utf8', 'replace'))
def write_leak_log(self, leak_log):
filename = self.output_filename(self.FILENAME_SUFFIX_LEAK_LOG + ".txt")
self._write_file(filename, leak_log)
def copy_sample_file(self, sample_file):
filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
self._filesystem.copyfile(sample_file, filename)
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
def create_text_diff_and_write_result(self, actual_text, expected_text):
# FIXME: This function is actually doing the diffs as well as writing results.
# It might be better to extract code which does 'diff' and make it a separate function.
if not actual_text or not expected_text:
return
file_type = '.txt'
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
# We treat diff output as binary. Diff output may contain multiple files
# in conflicting encodings.
diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
self._write_file(diff_filename, diff)
# Shell out to wdiff to get colored inline diffs.
if self._port.wdiff_available():
wdiff = self._port.wdiff_text(expected_filename, actual_filename)
wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
self._write_file(wdiff_filename, wdiff)
# Use WebKit's PrettyPatch.rb to get an HTML diff.
if self._port.pretty_patch_available():
pretty_patch = self._port.pretty_patch_text(diff_filename)
pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
self._write_file(pretty_patch_filename, pretty_patch)
def create_repaint_overlay_result(self, actual_text, expected_text):
html = repaint_overlay.generate_repaint_overlay_html(self._test_name, actual_text, expected_text)
if html:
overlay_filename = self.output_filename(self.FILENAME_SUFFIX_OVERLAY)
self._write_file(overlay_filename, html)
def write_audio_files(self, actual_audio, expected_audio):
self.write_output_files('.wav', actual_audio, expected_audio)
def write_image_files(self, actual_image, expected_image):
self.write_output_files('.png', actual_image, expected_image)
def write_image_diff_files(self, image_diff):
diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
self._write_file(diff_filename, image_diff)
diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
# FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
# FIXME: old-run-webkit-tests include a link to the test file.
html = """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>.label{font-weight:bold}</style>
</head>
<body>
Difference between images: <a href="%(diff_filename)s">diff</a><br>
<div class=imageText></div>
<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
<script>
(function() {
var preloadedImageCount = 0;
function preloadComplete() {
++preloadedImageCount;
if (preloadedImageCount < 2)
return;
toggleImages();
setInterval(toggleImages, 2000)
}
function preloadImage(url) {
image = new Image();
image.addEventListener('load', preloadComplete);
image.src = url;
return image;
}
function toggleImages() {
if (text.textContent == 'Expected Image') {
text.textContent = 'Actual Image';
container.replaceChild(actualImage, container.firstChild);
} else {
text.textContent = 'Expected Image';
container.replaceChild(expectedImage, container.firstChild);
}
}
var text = document.querySelector('.imageText');
var container = document.querySelector('.imageContainer');
var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
})();
</script>
</body>
</html>
""" % {
'title': self._test_name,
'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
'prefix': self._output_testname(''),
}
self._write_file(diffs_html_filename, html)
def write_reftest(self, src_filepath):
fs = self._filesystem
dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
self._write_file(dst_filepath, fs.read_binary_file(src_filepath))
|
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from modular_build import read_file, write_file
import os
import os.path as path
import generate_injected_script_externs
import generate_protocol_externs
import modular_build
import re
import shutil
import subprocess
import sys
import tempfile
try:
import simplejson as json
except ImportError:
import json
if len(sys.argv) == 2 and sys.argv[1] == '--help':
print("Usage: %s [module_names]" % path.basename(sys.argv[0]))
print(" module_names list of modules for which the Closure compilation should run.")
print(" If absent, the entire frontend will be compiled.")
sys.exit(0)
is_cygwin = sys.platform == 'cygwin'
def run_in_shell(command_line):
return subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
def to_platform_path(filepath):
if not is_cygwin:
return filepath
return re.sub(r'^/cygdrive/(\w)', '\\1:', filepath)
def to_platform_path_exact(filepath):
if not is_cygwin:
return filepath
output, _ = run_in_shell('cygpath -w %s' % filepath).communicate()
# pylint: disable=E1103
return output.strip().replace('\\', '\\\\')
scripts_path = path.dirname(path.abspath(__file__))
devtools_path = path.dirname(scripts_path)
inspector_path = path.join(path.dirname(devtools_path), 'core', 'inspector')
devtools_frontend_path = path.join(devtools_path, 'front_end')
patched_es6_externs_file = to_platform_path(path.join(devtools_frontend_path, 'es6.js'))
global_externs_file = to_platform_path(path.join(devtools_frontend_path, 'externs.js'))
protocol_externs_file = path.join(devtools_frontend_path, 'protocol_externs.js')
webgl_rendering_context_idl_path = path.join(path.dirname(devtools_path), 'core', 'html', 'canvas', 'WebGLRenderingContextBase.idl')
injected_script_source_name = path.join(inspector_path, 'InjectedScriptSource.js')
canvas_injected_script_source_name = path.join(inspector_path, 'InjectedScriptCanvasModuleSource.js')
injected_script_externs_idl_names = [
path.join(inspector_path, 'InjectedScriptHost.idl'),
path.join(inspector_path, 'JavaScriptCallFrame.idl'),
]
jsmodule_name_prefix = 'jsmodule_'
runtime_module_name = '_runtime'
type_checked_jsdoc_tags_list = ['param', 'return', 'type', 'enum']
type_checked_jsdoc_tags_or = '|'.join(type_checked_jsdoc_tags_list)
# Basic regex for invalid JsDoc types: an object type name ([A-Z][A-Za-z0-9.]+[A-Za-z0-9]) not preceded by '!', '?', ':' (this, new), or '.' (object property).
invalid_type_regex = re.compile(r'@(?:' + type_checked_jsdoc_tags_or + r')\s*\{.*(?<![!?:.A-Za-z0-9])([A-Z][A-Za-z0-9.]+[A-Za-z0-9])[^/]*\}')
invalid_type_designator_regex = re.compile(r'@(?:' + type_checked_jsdoc_tags_or + r')\s*.*(?<![{: ])([?!])=?\}')
invalid_non_object_type_regex = re.compile(r'@(?:' + type_checked_jsdoc_tags_or + r')\s*\{.*(![a-z]+)[^/]*\}')
error_warning_regex = re.compile(r'WARNING|ERROR')
loaded_css_regex = re.compile(r'(?:registerRequiredCSS|WebInspector\.View\.createStyleElement)\s*\(\s*"(.+)"\s*\)')
java_build_regex = re.compile(r'^\w+ version "(\d+)\.(\d+)')
errors_found = False
generate_protocol_externs.generate_protocol_externs(protocol_externs_file, path.join(devtools_path, 'protocol.json'))
# Based on http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python.
def which(program):
def is_exe(fpath):
return path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = path.split(program)
if fpath:
if is_exe(program):
return program
else:
for part in os.environ["PATH"].split(os.pathsep):
part = part.strip('"')
exe_file = path.join(part, program)
if is_exe(exe_file):
return exe_file
return None
def log_error(message):
print 'ERROR: ' + message
def error_excepthook(exctype, value, traceback):
print 'ERROR:'
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = error_excepthook
application_descriptors = ['devtools.json', 'inspector.json', 'toolbox.json']
loader = modular_build.DescriptorLoader(devtools_frontend_path)
descriptors = loader.load_applications(application_descriptors)
modules_by_name = descriptors.modules
def hasErrors(output):
return re.search(error_warning_regex, output) != None
def verify_jsdoc_extra(additional_files):
files = [to_platform_path(file) for file in descriptors.all_compiled_files() + additional_files]
file_list = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
file_list.write('\n'.join(files))
finally:
file_list.close()
return run_in_shell('%s -jar %s --files-list-name %s' % (java_exec, jsdoc_validator_jar, to_platform_path_exact(file_list.name))), file_list
def verify_jsdoc(additional_files):
def file_list():
return descriptors.all_compiled_files() + additional_files
errors_found = False
for full_file_name in file_list():
lineIndex = 0
with open(full_file_name, 'r') as sourceFile:
for line in sourceFile:
line = line.rstrip()
lineIndex += 1
if not line:
continue
if verify_jsdoc_line(full_file_name, lineIndex, line):
errors_found = True
return errors_found
def verify_jsdoc_line(fileName, lineIndex, line):
def print_error(message, errorPosition):
print '%s:%s: ERROR - %s%s%s%s%s%s' % (fileName, lineIndex, message, os.linesep, line, os.linesep, ' ' * errorPosition + '^', os.linesep)
known_css = {}
errors_found = False
match = re.search(invalid_type_regex, line)
if match:
print_error('Type "%s" nullability not marked explicitly with "?" (nullable) or "!" (non-nullable)' % match.group(1), match.start(1))
errors_found = True
match = re.search(invalid_non_object_type_regex, line)
if match:
print_error('Non-object type explicitly marked with "!" (non-nullable), which is the default and should be omitted', match.start(1))
errors_found = True
match = re.search(invalid_type_designator_regex, line)
if match:
print_error('Type nullability indicator misplaced, should precede type', match.start(1))
errors_found = True
match = re.search(loaded_css_regex, line)
if match:
file = path.join(devtools_frontend_path, match.group(1))
exists = known_css.get(file)
if exists is None:
exists = path.isfile(file)
known_css[file] = exists
if not exists:
print_error('Dynamically loaded CSS stylesheet is missing in the source tree', match.start(1))
errors_found = True
return errors_found
def find_java():
required_major = 1
required_minor = 7
exec_command = None
has_server_jvm = True
java_path = which('java')
if not java_path:
java_path = which('java.exe')
if not java_path:
print 'NOTE: No Java executable found in $PATH.'
sys.exit(1)
is_ok = False
java_version_out, _ = run_in_shell('%s -version' % java_path).communicate()
# pylint: disable=E1103
match = re.search(java_build_regex, java_version_out)
if match:
major = int(match.group(1))
minor = int(match.group(2))
is_ok = major >= required_major and minor >= required_minor
if is_ok:
exec_command = '%s -Xms1024m -server -XX:+TieredCompilation' % java_path
check_server_proc = run_in_shell('%s -version' % exec_command)
check_server_proc.communicate()
if check_server_proc.returncode != 0:
# Not all Java installs have server JVMs.
exec_command = exec_command.replace('-server ', '')
has_server_jvm = False
if not is_ok:
print 'NOTE: Java executable version %d.%d or above not found in $PATH.' % (required_major, required_minor)
sys.exit(1)
print 'Java executable: %s%s' % (java_path, '' if has_server_jvm else ' (no server JVM)')
return exec_command
java_exec = find_java()
closure_compiler_jar = to_platform_path(path.join(scripts_path, 'closure', 'compiler.jar'))
closure_runner_jar = to_platform_path(path.join(scripts_path, 'compiler-runner', 'closure-runner.jar'))
jsdoc_validator_jar = to_platform_path(path.join(scripts_path, 'jsdoc-validator', 'jsdoc-validator.jar'))
modules_dir = tempfile.mkdtemp()
common_closure_args = ' --summary_detail_level 3 --jscomp_error visibility --compilation_level SIMPLE_OPTIMIZATIONS --warning_level VERBOSE --language_in=ES6_STRICT --language_out=ES5_STRICT --accept_const_keyword --extra_annotation_name suppressReceiverCheck --extra_annotation_name suppressGlobalPropertiesCheck --module_output_path_prefix %s' % to_platform_path_exact(modules_dir + path.sep)
worker_modules_by_name = {}
dependents_by_module_name = {}
for module_name in descriptors.application:
module = descriptors.modules[module_name]
if descriptors.application[module_name].get('type', None) == 'worker':
worker_modules_by_name[module_name] = module
for dep in module.get('dependencies', []):
list = dependents_by_module_name.get(dep)
if not list:
list = []
dependents_by_module_name[dep] = list
list.append(module_name)
def check_conditional_dependencies():
for name in modules_by_name:
for dep_name in modules_by_name[name].get('dependencies', []):
dependency = modules_by_name[dep_name]
if dependency.get('experiment') or dependency.get('condition'):
log_error('Module "%s" may not depend on the conditional module "%s"' % (name, dep_name))
errors_found = True
check_conditional_dependencies()
def verify_worker_modules():
for name in modules_by_name:
for dependency in modules_by_name[name].get('dependencies', []):
if dependency in worker_modules_by_name:
log_error('Module "%s" may not depend on the worker module "%s"' % (name, dependency))
errors_found = True
verify_worker_modules()
def check_duplicate_files():
def check_module(module, seen_files, seen_modules):
name = module['name']
seen_modules[name] = True
for dep_name in module.get('dependencies', []):
if not dep_name in seen_modules:
check_module(modules_by_name[dep_name], seen_files, seen_modules)
for source in module.get('scripts', []):
referencing_module = seen_files.get(source)
if referencing_module:
log_error('Duplicate use of %s in "%s" (previously seen in "%s")' % (source, name, referencing_module))
seen_files[source] = name
for module_name in worker_modules_by_name:
check_module(worker_modules_by_name[module_name], {}, {})
print 'Checking duplicate files across modules...'
check_duplicate_files()
def module_arg(module_name):
return ' --module ' + jsmodule_name_prefix + module_name
def modules_to_check():
if len(sys.argv) == 1:
return descriptors.sorted_modules()
print 'Compiling only these modules: %s' % sys.argv[1:]
return [module for module in descriptors.sorted_modules() if module in set(sys.argv[1:])]
def dump_module(name, recursively, processed_modules):
if name in processed_modules:
return ''
processed_modules[name] = True
module = modules_by_name[name]
skipped_scripts = set(module.get('skip_compilation', []))
command = ''
dependencies = module.get('dependencies', [])
if recursively:
for dependency in dependencies:
command += dump_module(dependency, recursively, processed_modules)
command += module_arg(name) + ':'
filtered_scripts = descriptors.module_compiled_files(name)
command += str(len(filtered_scripts))
firstDependency = True
for dependency in dependencies + [runtime_module_name]:
if firstDependency:
command += ':'
else:
command += ','
firstDependency = False
command += jsmodule_name_prefix + dependency
for script in filtered_scripts:
command += ' --js ' + to_platform_path(path.join(devtools_frontend_path, name, script))
return command
print 'Compiling frontend...'
compiler_args_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
platform_protocol_externs_file = to_platform_path(protocol_externs_file)
runtime_js_path = to_platform_path(path.join(devtools_frontend_path, 'Runtime.js'))
checked_modules = modules_to_check()
for name in checked_modules:
closure_args = common_closure_args
closure_args += ' --externs ' + to_platform_path(patched_es6_externs_file)
closure_args += ' --externs ' + to_platform_path(global_externs_file)
closure_args += ' --externs ' + platform_protocol_externs_file
runtime_module = module_arg(runtime_module_name) + ':1 --js ' + runtime_js_path
closure_args += runtime_module + dump_module(name, True, {})
compiler_args_file.write('%s %s%s' % (name, closure_args, os.linesep))
finally:
compiler_args_file.close()
closure_runner_command = '%s -jar %s --compiler-args-file %s' % (java_exec, closure_runner_jar, to_platform_path_exact(compiler_args_file.name))
modular_compiler_proc = run_in_shell(closure_runner_command)
def unclosure_injected_script(sourceFileName, outFileName):
source = read_file(sourceFileName)
def replace_function(matchobj):
return re.sub(r'@param', 'param', matchobj.group(1) or '') + '\n//' + matchobj.group(2)
# Comment out the closure function and its jsdocs
source = re.sub(r'(/\*\*(?:[\s\n]*\*\s*@param[^\n]+\n)+\s*\*/\s*)?\n(\(function)', replace_function, source, count=1)
# Comment out its return statement
source = re.sub(r'\n(\s*return\s+[^;]+;\s*\n\}\)\s*)$', '\n/*\\1*/', source)
# Replace the "var Object" override with a "self.Object" one
source = re.sub(r'\nvar Object =', '\nself.Object =', source, count=1)
write_file(outFileName, source)
injectedScriptSourceTmpFile = to_platform_path(path.join(inspector_path, 'InjectedScriptSourceTmp.js'))
injectedScriptCanvasModuleSourceTmpFile = path.join(inspector_path, 'InjectedScriptCanvasModuleSourceTmp.js')
unclosure_injected_script(injected_script_source_name, injectedScriptSourceTmpFile)
unclosure_injected_script(canvas_injected_script_source_name, injectedScriptCanvasModuleSourceTmpFile)
print 'Compiling InjectedScriptSource.js and InjectedScriptCanvasModuleSource.js...'
injected_script_externs_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
generate_injected_script_externs.generate_injected_script_externs(injected_script_externs_idl_names, injected_script_externs_file)
finally:
injected_script_externs_file.close()
spawned_compiler_command = '%s -jar %s %s' % (java_exec, closure_compiler_jar, common_closure_args)
command = spawned_compiler_command
command += ' --externs ' + to_platform_path_exact(injected_script_externs_file.name)
command += ' --externs ' + to_platform_path(protocol_externs_file)
command += ' --module ' + jsmodule_name_prefix + 'injected_script' + ':1'
command += ' --js ' + to_platform_path(injectedScriptSourceTmpFile)
command += ' --module ' + jsmodule_name_prefix + 'injected_canvas_script' + ':1:' + jsmodule_name_prefix + 'injected_script'
command += ' --js ' + to_platform_path(injectedScriptCanvasModuleSourceTmpFile)
injectedScriptCompileProc = run_in_shell(command)
print 'Verifying JSDoc comments...'
additional_jsdoc_check_files = [injectedScriptSourceTmpFile, injectedScriptCanvasModuleSourceTmpFile]
errors_found |= verify_jsdoc(additional_jsdoc_check_files)
jsdocValidatorProc, jsdocValidatorFileList = verify_jsdoc_extra(additional_jsdoc_check_files)
print 'Checking generated code in InjectedScriptCanvasModuleSource.js...'
webgl_check_script_path = path.join(scripts_path, "check_injected_webgl_calls_info.py")
check_injected_webgl_calls_command = '%s %s %s' % (webgl_check_script_path, webgl_rendering_context_idl_path, canvas_injected_script_source_name)
canvasModuleCompileProc = run_in_shell(check_injected_webgl_calls_command)
print 'Validating InjectedScriptSource.js...'
injectedscript_check_script_path = path.join(scripts_path, "check_injected_script_source.py")
check_injected_script_command = '%s %s' % (injectedscript_check_script_path, injected_script_source_name)
validateInjectedScriptProc = run_in_shell(check_injected_script_command)
print
(jsdocValidatorOut, _) = jsdocValidatorProc.communicate()
if jsdocValidatorOut:
print ('JSDoc validator output:%s%s' % (os.linesep, jsdocValidatorOut))
errors_found = True
os.remove(jsdocValidatorFileList.name)
(moduleCompileOut, _) = modular_compiler_proc.communicate()
print 'Modular compilation output:'
start_module_regex = re.compile(r'^@@ START_MODULE:(.+) @@$')
end_module_regex = re.compile(r'^@@ END_MODULE @@$')
in_module = False
skipped_modules = {}
error_count = 0
def skip_dependents(module_name):
for skipped_module in dependents_by_module_name.get(module_name, []):
skipped_modules[skipped_module] = True
has_module_output = False
# pylint: disable=E1103
for line in moduleCompileOut.splitlines():
if not in_module:
match = re.search(start_module_regex, line)
if not match:
continue
in_module = True
has_module_output = True
module_error_count = 0
module_output = []
module_name = match.group(1)
skip_module = skipped_modules.get(module_name)
if skip_module:
skip_dependents(module_name)
else:
match = re.search(end_module_regex, line)
if not match:
if not skip_module:
module_output.append(line)
if hasErrors(line):
error_count += 1
module_error_count += 1
skip_dependents(module_name)
continue
in_module = False
if skip_module:
print 'Skipping module %s...' % module_name
elif not module_error_count:
print 'Module %s compiled successfully: %s' % (module_name, module_output[0])
else:
print 'Module %s compile failed: %s errors%s' % (module_name, module_error_count, os.linesep)
print os.linesep.join(module_output)
if not has_module_output:
print moduleCompileOut
if error_count:
print 'Total Closure errors: %d%s' % (error_count, os.linesep)
errors_found = True
(injectedScriptCompileOut, _) = injectedScriptCompileProc.communicate()
print 'InjectedScriptSource.js and InjectedScriptCanvasModuleSource.js compilation output:%s' % os.linesep, injectedScriptCompileOut
errors_found |= hasErrors(injectedScriptCompileOut)
(canvasModuleCompileOut, _) = canvasModuleCompileProc.communicate()
print 'InjectedScriptCanvasModuleSource.js generated code check output:%s' % os.linesep, canvasModuleCompileOut
errors_found |= hasErrors(canvasModuleCompileOut)
(validateInjectedScriptOut, _) = validateInjectedScriptProc.communicate()
print 'Validate InjectedScriptSource.js output:%s' % os.linesep, (validateInjectedScriptOut if validateInjectedScriptOut else '<empty>')
errors_found |= hasErrors(validateInjectedScriptOut)
if errors_found:
print 'ERRORS DETECTED'
os.remove(injectedScriptSourceTmpFile)
os.remove(injectedScriptCanvasModuleSourceTmpFile)
os.remove(compiler_args_file.name)
os.remove(injected_script_externs_file.name)
os.remove(protocol_externs_file)
shutil.rmtree(modules_dir, True)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluate Object Detection result on a single image.
Annotate each detected result as true positives or false positive according to
a predefined IOU ratio. Non Maximum Supression is used by default. Multi class
detection is supported by default.
"""
import numpy as np
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class PerImageEvaluation(object):
"""Evaluate detection result of a single image."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=0.3,
nms_max_output_boxes=50):
"""Initialized PerImageEvaluation by evaluation parameters.
Args:
num_groundtruth_classes: Number of ground truth object classes
matching_iou_threshold: A ratio of area intersection to union, which is
the threshold to consider whether a detection is true positive or not
nms_iou_threshold: IOU threshold used in Non Maximum Suppression.
nms_max_output_boxes: Number of maximum output boxes in NMS.
"""
self.matching_iou_threshold = matching_iou_threshold
self.nms_iou_threshold = nms_iou_threshold
self.nms_max_output_boxes = nms_max_output_boxes
self.num_groundtruth_classes = num_groundtruth_classes
def compute_object_detection_metrics(self, detected_boxes, detected_scores,
detected_class_labels, groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_lists):
"""Compute Object Detection related metrics from a single image.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_lists: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
Returns:
scores: A list of C float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
tp_fp_labels: A list of C boolean numpy arrays. Each numpy array
is of shape [K, 1], representing K True/False positive label of
object instances detected with class label c
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
"""
detected_boxes, detected_scores, detected_class_labels = (
self._remove_invalid_boxes(detected_boxes, detected_scores,
detected_class_labels))
scores, tp_fp_labels = self._compute_tp_fp(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels,
groundtruth_is_difficult_lists)
is_class_correctly_detected_in_image = self._compute_cor_loc(
detected_boxes, detected_scores, detected_class_labels,
groundtruth_boxes, groundtruth_class_labels)
return scores, tp_fp_labels, is_class_correctly_detected_in_image
def _compute_cor_loc(self, detected_boxes, detected_scores,
detected_class_labels, groundtruth_boxes,
groundtruth_class_labels):
"""Compute CorLoc score for object detection result.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
Returns:
is_class_correctly_detected_in_image: a numpy integer array of
shape [C, 1], indicating whether the correponding class has a least
one instance being correctly detected in the image
"""
is_class_correctly_detected_in_image = np.zeros(
self.num_groundtruth_classes, dtype=int)
for i in range(self.num_groundtruth_classes):
gt_boxes_at_ith_class = groundtruth_boxes[
groundtruth_class_labels == i, :]
detected_boxes_at_ith_class = detected_boxes[
detected_class_labels == i, :]
detected_scores_at_ith_class = detected_scores[detected_class_labels == i]
is_class_correctly_detected_in_image[i] = (
self._compute_is_aclass_correctly_detected_in_image(
detected_boxes_at_ith_class, detected_scores_at_ith_class,
gt_boxes_at_ith_class))
return is_class_correctly_detected_in_image
def _compute_is_aclass_correctly_detected_in_image(
self, detected_boxes, detected_scores, groundtruth_boxes):
"""Compute CorLoc score for a single class.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
Returns:
is_class_correctly_detected_in_image: An integer 1 or 0 denoting whether a
class is correctly detected in the image or not
"""
if detected_boxes.size > 0:
if groundtruth_boxes.size > 0:
max_score_id = np.argmax(detected_scores)
detected_boxlist = np_box_list.BoxList(
np.expand_dims(detected_boxes[max_score_id, :], axis=0))
gt_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist)
if np.max(iou) >= self.matching_iou_threshold:
return 1
return 0
def _compute_tp_fp(self, detected_boxes, detected_scores,
detected_class_labels, groundtruth_boxes,
groundtruth_class_labels, groundtruth_is_difficult_lists):
"""Labels true/false positives of detections of an image across all classes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1], repreneting
the class labels of the detected N object instances.
groundtruth_boxes: A float numpy array of shape [M, 4], representing M
regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
groundtruth_is_difficult_lists: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
Returns:
result_scores: A list of float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of
shape [K, 1], representing K True/False positive label of object
instances detected with class label c
"""
result_scores = []
result_tp_fp_labels = []
for i in range(self.num_groundtruth_classes):
gt_boxes_at_ith_class = groundtruth_boxes[(groundtruth_class_labels == i
), :]
groundtruth_is_difficult_list_at_ith_class = (
groundtruth_is_difficult_lists[groundtruth_class_labels == i])
detected_boxes_at_ith_class = detected_boxes[(detected_class_labels == i
), :]
detected_scores_at_ith_class = detected_scores[detected_class_labels == i]
scores, tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_boxes_at_ith_class, detected_scores_at_ith_class,
gt_boxes_at_ith_class, groundtruth_is_difficult_list_at_ith_class)
result_scores.append(scores)
result_tp_fp_labels.append(tp_fp_labels)
return result_scores, result_tp_fp_labels
def _remove_invalid_boxes(self, detected_boxes, detected_scores,
detected_class_labels):
valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2],
detected_boxes[:, 1] < detected_boxes[:, 3])
return (detected_boxes[valid_indices, :], detected_scores[valid_indices],
detected_class_labels[valid_indices])
def _compute_tp_fp_for_single_class(self, detected_boxes, detected_scores,
groundtruth_boxes,
groundtruth_is_difficult_list):
"""Labels boxes detected with the same class from the same image as tp/fp.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected box
coordinates
detected_scores: A 1-d numpy array of length N representing classification
score
groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth
box coordinates
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
Returns:
scores: A numpy array representing the detection scores
tp_fp_labels: a boolean numpy array indicating whether a detection is a
true positive.
"""
if detected_boxes.size == 0:
return np.array([], dtype=float), np.array([], dtype=bool)
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
detected_boxlist = np_box_list_ops.non_max_suppression(
detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold)
scores = detected_boxlist.get_field('scores')
if groundtruth_boxes.size == 0:
return scores, np.zeros(detected_boxlist.num_boxes(), dtype=bool)
gt_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist)
max_overlap_gt_ids = np.argmax(iou, axis=1)
is_gt_box_detected = np.zeros(gt_boxlist.num_boxes(), dtype=bool)
tp_fp_labels = np.zeros(detected_boxlist.num_boxes(), dtype=bool)
is_matched_to_difficult_box = np.zeros(
detected_boxlist.num_boxes(), dtype=bool)
for i in range(detected_boxlist.num_boxes()):
gt_id = max_overlap_gt_ids[i]
if iou[i, gt_id] >= self.matching_iou_threshold:
if not groundtruth_is_difficult_list[gt_id]:
if not is_gt_box_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_box_detected[gt_id] = True
else:
is_matched_to_difficult_box[i] = True
return scores[~is_matched_to_difficult_box], tp_fp_labels[
~is_matched_to_difficult_box]
|
|
"""
Control Modjk via the Apache Tomcat "Status" worker
(http://tomcat.apache.org/connectors-doc/reference/status.html)
Below is an example of the configuration needed for this module. This
configuration data can be placed either in :ref:`grains
<targeting-grains>` or :ref:`pillar <salt-pillars>`.
If using grains, this can be accomplished :ref:`statically
<static-custom-grains>` or via a :ref:`grain module <writing-grains>`.
If using pillar, the yaml configuration can be placed directly into a pillar
SLS file, making this both the easier and more dynamic method of configuring
this module.
.. code-block:: yaml
modjk:
default:
url: http://localhost/jkstatus
user: modjk
pass: secret
realm: authentication realm for digest passwords
timeout: 5
otherVhost:
url: http://otherVhost/jkstatus
user: modjk
pass: secret2
realm: authentication realm2 for digest passwords
timeout: 600
"""
import urllib.parse
import urllib.request
def __virtual__():
"""
Always load
"""
return True
def _auth(url, user, passwd, realm):
"""
returns a authentication handler.
"""
basic = urllib.request.HTTPBasicAuthHandler()
basic.add_password(realm=realm, uri=url, user=user, passwd=passwd)
digest = urllib.request.HTTPDigestAuthHandler()
digest.add_password(realm=realm, uri=url, user=user, passwd=passwd)
return urllib.request.build_opener(basic, digest)
def _do_http(opts, profile="default"):
"""
Make the http request and return the data
"""
ret = {}
url = __salt__["config.get"]("modjk:{}:url".format(profile), "")
user = __salt__["config.get"]("modjk:{}:user".format(profile), "")
passwd = __salt__["config.get"]("modjk:{}:pass".format(profile), "")
realm = __salt__["config.get"]("modjk:{}:realm".format(profile), "")
timeout = __salt__["config.get"]("modjk:{}:timeout".format(profile), "")
if not url:
raise Exception("missing url in profile {}".format(profile))
if user and passwd:
auth = _auth(url=url, realm=realm, user=user, passwd=passwd)
urllib.request.install_opener(auth)
url += "?{}".format(urllib.parse.urlencode(opts))
for line in urllib.request.urlopen(url, timeout=timeout).read().splitlines():
splt = line.split("=", 1)
if splt[0] in ret:
ret[splt[0]] += ",{}".format(splt[1])
else:
ret[splt[0]] = splt[1]
return ret
def _worker_ctl(worker, lbn, vwa, profile="default"):
"""
enable/disable/stop a worker
"""
cmd = {
"cmd": "update",
"mime": "prop",
"w": lbn,
"sw": worker,
"vwa": vwa,
}
return _do_http(cmd, profile)["worker.result.type"] == "OK"
def version(profile="default"):
"""
Return the modjk version
CLI Examples:
.. code-block:: bash
salt '*' modjk.version
salt '*' modjk.version other-profile
"""
cmd = {
"cmd": "version",
"mime": "prop",
}
return _do_http(cmd, profile)["worker.jk_version"].split("/")[-1]
def get_running(profile="default"):
"""
Get the current running config (not from disk)
CLI Examples:
.. code-block:: bash
salt '*' modjk.get_running
salt '*' modjk.get_running other-profile
"""
cmd = {
"cmd": "list",
"mime": "prop",
}
return _do_http(cmd, profile)
def dump_config(profile="default"):
"""
Dump the original configuration that was loaded from disk
CLI Examples:
.. code-block:: bash
salt '*' modjk.dump_config
salt '*' modjk.dump_config other-profile
"""
cmd = {
"cmd": "dump",
"mime": "prop",
}
return _do_http(cmd, profile)
def list_configured_members(lbn, profile="default"):
"""
Return a list of member workers from the configuration files
CLI Examples:
.. code-block:: bash
salt '*' modjk.list_configured_members loadbalancer1
salt '*' modjk.list_configured_members loadbalancer1 other-profile
"""
config = dump_config(profile)
try:
ret = config["worker.{}.balance_workers".format(lbn)]
except KeyError:
return []
return [_f for _f in ret.strip().split(",") if _f]
def workers(profile="default"):
"""
Return a list of member workers and their status
CLI Examples:
.. code-block:: bash
salt '*' modjk.workers
salt '*' modjk.workers other-profile
"""
config = get_running(profile)
lbn = config["worker.list"].split(",")
worker_list = []
ret = {}
for lb in lbn:
try:
worker_list.extend(
config["worker.{}.balance_workers".format(lb)].split(",")
)
except KeyError:
pass
worker_list = list(set(worker_list))
for worker in worker_list:
ret[worker] = {
"activation": config["worker.{}.activation".format(worker)],
"state": config["worker.{}.state".format(worker)],
}
return ret
def recover_all(lbn, profile="default"):
"""
Set the all the workers in lbn to recover and activate them if they are not
CLI Examples:
.. code-block:: bash
salt '*' modjk.recover_all loadbalancer1
salt '*' modjk.recover_all loadbalancer1 other-profile
"""
ret = {}
config = get_running(profile)
try:
workers_ = config["worker.{}.balance_workers".format(lbn)].split(",")
except KeyError:
return ret
for worker in workers_:
curr_state = worker_status(worker, profile)
if curr_state["activation"] != "ACT":
worker_activate(worker, lbn, profile)
if not curr_state["state"].startswith("OK"):
worker_recover(worker, lbn, profile)
ret[worker] = worker_status(worker, profile)
return ret
def reset_stats(lbn, profile="default"):
"""
Reset all runtime statistics for the load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.reset_stats loadbalancer1
salt '*' modjk.reset_stats loadbalancer1 other-profile
"""
cmd = {
"cmd": "reset",
"mime": "prop",
"w": lbn,
}
return _do_http(cmd, profile)["worker.result.type"] == "OK"
def lb_edit(lbn, settings, profile="default"):
"""
Edit the loadbalancer settings
Note: http://tomcat.apache.org/connectors-doc/reference/status.html
Data Parameters for the standard Update Action
CLI Examples:
.. code-block:: bash
salt '*' modjk.lb_edit loadbalancer1 "{'vlr': 1, 'vlt': 60}"
salt '*' modjk.lb_edit loadbalancer1 "{'vlr': 1, 'vlt': 60}" other-profile
"""
settings["cmd"] = "update"
settings["mime"] = "prop"
settings["w"] = lbn
return _do_http(settings, profile)["worker.result.type"] == "OK"
def bulk_stop(workers, lbn, profile="default"):
"""
Stop all the given workers in the specific load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.bulk_stop node1,node2,node3 loadbalancer1
salt '*' modjk.bulk_stop node1,node2,node3 loadbalancer1 other-profile
salt '*' modjk.bulk_stop ["node1","node2","node3"] loadbalancer1
salt '*' modjk.bulk_stop ["node1","node2","node3"] loadbalancer1 other-profile
"""
ret = {}
if isinstance(workers, str):
workers = workers.split(",")
for worker in workers:
try:
ret[worker] = worker_stop(worker, lbn, profile)
except Exception: # pylint: disable=broad-except
ret[worker] = False
return ret
def bulk_activate(workers, lbn, profile="default"):
"""
Activate all the given workers in the specific load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.bulk_activate node1,node2,node3 loadbalancer1
salt '*' modjk.bulk_activate node1,node2,node3 loadbalancer1 other-profile
salt '*' modjk.bulk_activate ["node1","node2","node3"] loadbalancer1
salt '*' modjk.bulk_activate ["node1","node2","node3"] loadbalancer1 other-profile
"""
ret = {}
if isinstance(workers, str):
workers = workers.split(",")
for worker in workers:
try:
ret[worker] = worker_activate(worker, lbn, profile)
except Exception: # pylint: disable=broad-except
ret[worker] = False
return ret
def bulk_disable(workers, lbn, profile="default"):
"""
Disable all the given workers in the specific load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.bulk_disable node1,node2,node3 loadbalancer1
salt '*' modjk.bulk_disable node1,node2,node3 loadbalancer1 other-profile
salt '*' modjk.bulk_disable ["node1","node2","node3"] loadbalancer1
salt '*' modjk.bulk_disable ["node1","node2","node3"] loadbalancer1 other-profile
"""
ret = {}
if isinstance(workers, str):
workers = workers.split(",")
for worker in workers:
try:
ret[worker] = worker_disable(worker, lbn, profile)
except Exception: # pylint: disable=broad-except
ret[worker] = False
return ret
def bulk_recover(workers, lbn, profile="default"):
"""
Recover all the given workers in the specific load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.bulk_recover node1,node2,node3 loadbalancer1
salt '*' modjk.bulk_recover node1,node2,node3 loadbalancer1 other-profile
salt '*' modjk.bulk_recover ["node1","node2","node3"] loadbalancer1
salt '*' modjk.bulk_recover ["node1","node2","node3"] loadbalancer1 other-profile
"""
ret = {}
if isinstance(workers, str):
workers = workers.split(",")
for worker in workers:
try:
ret[worker] = worker_recover(worker, lbn, profile)
except Exception: # pylint: disable=broad-except
ret[worker] = False
return ret
def worker_status(worker, profile="default"):
"""
Return the state of the worker
CLI Examples:
.. code-block:: bash
salt '*' modjk.worker_status node1
salt '*' modjk.worker_status node1 other-profile
"""
config = get_running(profile)
try:
return {
"activation": config["worker.{}.activation".format(worker)],
"state": config["worker.{}.state".format(worker)],
}
except KeyError:
return False
def worker_recover(worker, lbn, profile="default"):
"""
Set the worker to recover
this module will fail if it is in OK state
CLI Examples:
.. code-block:: bash
salt '*' modjk.worker_recover node1 loadbalancer1
salt '*' modjk.worker_recover node1 loadbalancer1 other-profile
"""
cmd = {
"cmd": "recover",
"mime": "prop",
"w": lbn,
"sw": worker,
}
return _do_http(cmd, profile)
def worker_disable(worker, lbn, profile="default"):
"""
Set the worker to disable state in the lbn load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.worker_disable node1 loadbalancer1
salt '*' modjk.worker_disable node1 loadbalancer1 other-profile
"""
return _worker_ctl(worker, lbn, "d", profile)
def worker_activate(worker, lbn, profile="default"):
"""
Set the worker to activate state in the lbn load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.worker_activate node1 loadbalancer1
salt '*' modjk.worker_activate node1 loadbalancer1 other-profile
"""
return _worker_ctl(worker, lbn, "a", profile)
def worker_stop(worker, lbn, profile="default"):
"""
Set the worker to stopped state in the lbn load balancer
CLI Examples:
.. code-block:: bash
salt '*' modjk.worker_activate node1 loadbalancer1
salt '*' modjk.worker_activate node1 loadbalancer1 other-profile
"""
return _worker_ctl(worker, lbn, "s", profile)
def worker_edit(worker, lbn, settings, profile="default"):
"""
Edit the worker settings
Note: http://tomcat.apache.org/connectors-doc/reference/status.html
Data Parameters for the standard Update Action
CLI Examples:
.. code-block:: bash
salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}"
salt '*' modjk.worker_edit node1 loadbalancer1 "{'vwf': 500, 'vwd': 60}" other-profile
"""
settings["cmd"] = "update"
settings["mime"] = "prop"
settings["w"] = lbn
settings["sw"] = worker
return _do_http(settings, profile)["worker.result.type"] == "OK"
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import netaddr
from nova import exception
def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return dict([(str(k), v) for k, v in d.iteritems()])
class Model(dict):
"""Defines some necessary structures for most of the network models"""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
def set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
class IP(Model):
"""Represents an IP address in Nova"""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self.set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError, e:
raise exception.InvalidIpAddressError(self['address'])
def __eq__(self, other):
return self['address'] == other['address']
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return IP(**ensure_string_keys(ip))
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova"""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@classmethod
def hydrate(cls, fixed_ip):
fixed_ip = FixedIP(**ensure_string_keys(fixed_ip))
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
class Route(Model):
"""Represents an IP Route in Nova"""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
self['interface'] = interface
self.set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = Route(**ensure_string_keys(route))
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova"""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self.set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
return self['cidr'] == other['cidr']
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convience function to get cidr as a netaddr object"""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = Subnet(**ensure_string_keys(subnet))
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova"""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self.set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = Network(**ensure_string_keys(network))
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
class VIF(Model):
"""Represents a Virtual Interface in Nova"""
def __init__(self, id=None, address=None, network=None, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self.set_meta(kwargs)
def __eq__(self, other):
return self['id'] == other['id']
def fixed_ips(self):
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
@classmethod
def hydrate(cls, vif):
vif = VIF(**ensure_string_keys(vif))
vif['network'] = Network.hydrate(vif['network'])
return vif
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance"""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached"""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips"""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, basestring):
network_info = json.loads(network_info)
return NetworkInfo([VIF.hydrate(vif) for vif in network_info])
def as_cache(self):
return json.dumps(self)
|
|
# -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Third party imports
import numpy as np
from scipy import signal
# Local application imports
from mosqito.functions.loudness_zwicker.square_and_smooth import square_and_smooth
def calc_third_octave_levels(sig, fs):
"""3rd octave filtering, squaring, smoothing, level calculation and
downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz
See ISO 532-1 section 6.3
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz[pa]
fs : int
time signal sampling frequency
Outputs
-------
third_octave_levels : numpy.ndarray
Set of time signals filtered per third octave bands
"""
# Sampling frequency shall be equal to 48 kHz (as per ISO 532)
if fs != 48000:
raise ValueError("""ERROR: Sampling frequency shall be equal to 48 kHz""")
# Constants
n_level_band = 28
n_filter_coeff = 6
dec_factor = int(fs / 2000)
# Initialisation
coeff = np.zeros(n_filter_coeff)
# Filter coefficients of one-third-octave-band filters (reference
# table)
# ISO 532-1 Table A.1
third_octave_filter_ref = np.array(
[[1, 2, 1, 1, -2, 1], [1, 0, -1, 1, -2, 1], [1, -2, 1, 1, -2, 1]]
)
# Filter coefficients of one-third-octave-band filters (difference to
# reference table for 28 one-third-octave-band filters)
# ISO 532-1 Table A.2
third_octave_filter = np.array(
[
[
[0, 0, 0, 0, -6.70260e-004, 6.59453e-004],
[0, 0, 0, 0, -3.75071e-004, 3.61926e-004],
[0, 0, 0, 0, -3.06523e-004, 2.97634e-004],
],
[
[0, 0, 0, 0, -8.47258e-004, 8.30131e-004],
[0, 0, 0, 0, -4.76448e-004, 4.55616e-004],
[0, 0, 0, 0, -3.88773e-004, 3.74685e-004],
],
[
[0, 0, 0, 0, -1.07210e-003, 1.04496e-003],
[0, 0, 0, 0, -6.06567e-004, 5.73553e-004],
[0, 0, 0, 0, -4.94004e-004, 4.71677e-004],
],
[
[0, 0, 0, 0, -1.35836e-003, 1.31535e-003],
[0, 0, 0, 0, -7.74327e-004, 7.22007e-004],
[0, 0, 0, 0, -6.29154e-004, 5.93771e-004],
],
[
[0, 0, 0, 0, -1.72380e-003, 1.65564e-003],
[0, 0, 0, 0, -9.91780e-004, 9.08866e-004],
[0, 0, 0, 0, -8.03529e-004, 7.47455e-004],
],
[
[0, 0, 0, 0, -2.19188e-003, 2.08388e-003],
[0, 0, 0, 0, -1.27545e-003, 1.14406e-003],
[0, 0, 0, 0, -1.02976e-003, 9.40900e-004],
],
[
[0, 0, 0, 0, -2.79386e-003, 2.62274e-003],
[0, 0, 0, 0, -1.64828e-003, 1.44006e-003],
[0, 0, 0, 0, -1.32520e-003, 1.18438e-003],
],
[
[0, 0, 0, 0, -3.57182e-003, 3.30071e-003],
[0, 0, 0, 0, -2.14252e-003, 1.81258e-003],
[0, 0, 0, 0, -1.71397e-003, 1.49082e-003],
],
[
[0, 0, 0, 0, -4.58305e-003, 4.15355e-003],
[0, 0, 0, 0, -2.80413e-003, 2.28135e-003],
[0, 0, 0, 0, -2.23006e-003, 1.87646e-003],
],
[
[0, 0, 0, 0, -5.90655e-003, 5.22622e-003],
[0, 0, 0, 0, -3.69947e-003, 2.87118e-003],
[0, 0, 0, 0, -2.92205e-003, 2.36178e-003],
],
[
[0, 0, 0, 0, -7.65243e-003, 6.57493e-003],
[0, 0, 0, 0, -4.92540e-003, 3.61318e-003],
[0, 0, 0, 0, -3.86007e-003, 2.97240e-003],
],
[
[0, 0, 0, 0, -1.00023e-002, 8.29610e-003],
[0, 0, 0, 0, -6.63788e-003, 4.55999e-003],
[0, 0, 0, 0, -5.15982e-003, 3.75306e-003],
],
[
[0, 0, 0, 0, -1.31230e-002, 1.04220e-002],
[0, 0, 0, 0, -9.02274e-003, 5.73132e-003],
[0, 0, 0, 0, -6.94543e-003, 4.71734e-003],
],
[
[0, 0, 0, 0, -1.73693e-002, 1.30947e-002],
[0, 0, 0, 0, -1.24176e-002, 7.20526e-003],
[0, 0, 0, 0, -9.46002e-003, 5.93145e-003],
],
[
[0, 0, 0, 0, -2.31934e-002, 1.64308e-002],
[0, 0, 0, 0, -1.73009e-002, 9.04761e-003],
[0, 0, 0, 0, -1.30358e-002, 7.44926e-003],
],
[
[0, 0, 0, 0, -3.13292e-002, 2.06370e-002],
[0, 0, 0, 0, -2.44342e-002, 1.13731e-002],
[0, 0, 0, 0, -1.82108e-002, 9.36778e-003],
],
[
[0, 0, 0, 0, -4.28261e-002, 2.59325e-002],
[0, 0, 0, 0, -3.49619e-002, 1.43046e-002],
[0, 0, 0, 0, -2.57855e-002, 1.17912e-002],
],
[
[0, 0, 0, 0, -5.91733e-002, 3.25054e-002],
[0, 0, 0, 0, -5.06072e-002, 1.79513e-002],
[0, 0, 0, 0, -3.69401e-002, 1.48094e-002],
],
[
[0, 0, 0, 0, -8.26348e-002, 4.05894e-002],
[0, 0, 0, 0, -7.40348e-002, 2.24476e-002],
[0, 0, 0, 0, -5.34977e-002, 1.85371e-002],
],
[
[0, 0, 0, 0, -1.17018e-001, 5.08116e-002],
[0, 0, 0, 0, -1.09516e-001, 2.81387e-002],
[0, 0, 0, 0, -7.85097e-002, 2.32872e-002],
],
[
[0, 0, 0, 0, -1.67714e-001, 6.37872e-002],
[0, 0, 0, 0, -1.63378e-001, 3.53729e-002],
[0, 0, 0, 0, -1.16419e-001, 2.93723e-002],
],
[
[0, 0, 0, 0, -2.42528e-001, 7.98576e-002],
[0, 0, 0, 0, -2.45161e-001, 4.43370e-002],
[0, 0, 0, 0, -1.73972e-001, 3.70015e-002],
],
[
[0, 0, 0, 0, -3.53142e-001, 9.96330e-002],
[0, 0, 0, 0, -3.69163e-001, 5.53535e-002],
[0, 0, 0, 0, -2.61399e-001, 4.65428e-002],
],
[
[0, 0, 0, 0, -5.16316e-001, 1.24177e-001],
[0, 0, 0, 0, -5.55473e-001, 6.89403e-002],
[0, 0, 0, 0, -3.93998e-001, 5.86715e-002],
],
[
[0, 0, 0, 0, -7.56635e-001, 1.55023e-001],
[0, 0, 0, 0, -8.34281e-001, 8.58123e-002],
[0, 0, 0, 0, -5.94547e-001, 7.43960e-002],
],
[
[0, 0, 0, 0, -1.10165e000, 1.91713e-001],
[0, 0, 0, 0, -1.23939e000, 1.05243e-001],
[0, 0, 0, 0, -8.91666e-001, 9.40354e-002],
],
[
[0, 0, 0, 0, -1.58477e000, 2.39049e-001],
[0, 0, 0, 0, -1.80505e000, 1.28794e-001],
[0, 0, 0, 0, -1.32500e000, 1.21333e-001],
],
[
[0, 0, 0, 0, -2.50630e000, 1.42308e-001],
[0, 0, 0, 0, -2.19464e000, 2.76470e-001],
[0, 0, 0, 0, -1.90231e000, 1.47304e-001],
],
]
)
# Filter gain values
# ISO 532-1 Table A.2
filter_gain = np.array(
[
4.30764e-011,
8.59340e-011,
1.71424e-010,
3.41944e-010,
6.82035e-010,
1.36026e-009,
2.71261e-009,
5.40870e-009,
1.07826e-008,
2.14910e-008,
4.28228e-008,
8.54316e-008,
1.70009e-007,
3.38215e-007,
6.71990e-007,
1.33531e-006,
2.65172e-006,
5.25477e-006,
1.03780e-005,
2.04870e-005,
4.05198e-005,
7.97914e-005,
1.56511e-004,
3.04954e-004,
5.99157e-004,
1.16544e-003,
2.27488e-003,
3.91006e-003,
]
)
# Definition of the range of preferred filter center frequency
freq = [
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
]
n_time = len(sig[::dec_factor])
time_axis = np.linspace(0, len(sig) / fs, num=n_time)
third_octave_level = np.zeros((n_level_band, n_time))
for i_bands in range(n_level_band):
# Initialisation
tiny_value = 10 ** -12
i_ref = 4 * 10 ** -10
# 2nd order fltering (See ISO 532-1 section 6.3 and A.2)
coeff = third_octave_filter_ref - third_octave_filter[i_bands, :, :]
sig_filt = filter_gain[i_bands] * signal.sosfilt(coeff, sig)
# Calculate center frequency of filter
center_freq = 10 ** ((i_bands - 16) / 10) * 1000
# Squaring and smoothing of filtered signal
sig_filt = square_and_smooth(sig_filt, center_freq, 48000)
# SPL calculation and decimation
third_octave_level[i_bands, :] = 10 * np.log10(
(sig_filt[::dec_factor] + tiny_value) / i_ref
)
return third_octave_level, freq, time_axis
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image-related utility functions."""
import math
from typing import Tuple
from absl import logging
import cv2
import imageio
import numpy as np
from PIL import Image
from hypernerf import gpath
from hypernerf import types
UINT8_MAX = 255
UINT16_MAX = 65535
def make_divisible(image: np.ndarray, divisor: int) -> np.ndarray:
"""Trim the image if not divisible by the divisor."""
height, width = image.shape[:2]
if height % divisor == 0 and width % divisor == 0:
return image
new_height = height - height % divisor
new_width = width - width % divisor
return image[:new_height, :new_width]
def downsample_image(image: np.ndarray, scale: int) -> np.ndarray:
"""Downsamples the image by an integer factor to prevent artifacts."""
if scale == 1:
return image
height, width = image.shape[:2]
if height % scale > 0 or width % scale > 0:
raise ValueError(f'Image shape ({height},{width}) must be divisible by the'
f' scale ({scale}).')
out_height, out_width = height // scale, width // scale
resized = cv2.resize(image, (out_width, out_height), cv2.INTER_AREA)
return resized
def upsample_image(image: np.ndarray, scale: int) -> np.ndarray:
"""Upsamples the image by an integer factor."""
if scale == 1:
return image
height, width = image.shape[:2]
out_height, out_width = height * scale, width * scale
resized = cv2.resize(image, (out_width, out_height), cv2.INTER_AREA)
return resized
def reshape_image(image: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
"""Reshapes the image to the given shape."""
out_height, out_width = shape
return cv2.resize(
image, (out_width, out_height), interpolation=cv2.INTER_AREA)
def rescale_image(image: np.ndarray, scale_factor: float) -> np.ndarray:
"""Resize an image by a scale factor, using integer resizing if possible."""
scale_factor = float(scale_factor)
if scale_factor <= 0.0:
raise ValueError('scale_factor must be a non-negative number.')
if scale_factor == 1.0:
return image
height, width = image.shape[:2]
if scale_factor.is_integer():
return upsample_image(image, int(scale_factor))
inv_scale = 1.0 / scale_factor
if (inv_scale.is_integer() and (scale_factor * height).is_integer() and
(scale_factor * width).is_integer()):
return downsample_image(image, int(inv_scale))
logging.warning(
'resizing image by non-integer factor %f, this may lead to artifacts.',
scale_factor)
height, width = image.shape[:2]
out_height = math.ceil(height * scale_factor)
out_height -= out_height % 2
out_width = math.ceil(width * scale_factor)
out_width -= out_width % 2
return reshape_image(image, (out_height, out_width))
def crop_image(image, left=0, right=0, top=0, bottom=0):
pad_width = [max(0, -x) for x in [top, bottom, left, right]]
if any(pad_width):
image = np.pad(image, pad_width=pad_width, mode='constant')
h, w = image.shape[:2]
crop_coords = [max(0, x) for x in (top, bottom, left, right)]
return image[crop_coords[0]:h - crop_coords[1],
crop_coords[2]:w - crop_coords[3], :]
def variance_of_laplacian(image: np.ndarray) -> np.ndarray:
"""Compute the variance of the Laplacian which measure the focus."""
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return cv2.Laplacian(gray, cv2.CVX_64F).var()
def image_to_uint8(image: np.ndarray) -> np.ndarray:
"""Convert the image to a uint8 array."""
if image.dtype == np.uint8:
return image
if not issubclass(image.dtype.type, np.floating):
raise ValueError(
f'Input image should be a floating type but is of type {image.dtype!r}')
return (image * UINT8_MAX).clip(0.0, UINT8_MAX).astype(np.uint8)
def image_to_uint16(image: np.ndarray) -> np.ndarray:
"""Convert the image to a uint16 array."""
if image.dtype == np.uint16:
return image
if not issubclass(image.dtype.type, np.floating):
raise ValueError(
f'Input image should be a floating type but is of type {image.dtype!r}')
return (image * UINT16_MAX).clip(0.0, UINT16_MAX).astype(np.uint16)
def image_to_float32(image: np.ndarray) -> np.ndarray:
"""Convert the image to a float32 array and scale values appropriately."""
if image.dtype == np.float32:
return image
dtype = image.dtype
image = image.astype(np.float32)
if dtype == np.uint8:
return image / UINT8_MAX
elif dtype == np.uint16:
return image / UINT16_MAX
elif dtype == np.float64:
return image
elif dtype == np.float16:
return image
raise ValueError(f'Not sure how to handle dtype {dtype}')
def load_image(path: types.PathType) -> np.ndarray:
"""Reads an image."""
if not isinstance(path, gpath.GPath):
path = gpath.GPath(path)
with path.open('rb') as f:
return imageio.imread(f)
def save_image(path: types.PathType, image: np.ndarray) -> None:
"""Saves the image to disk or gfile."""
if not isinstance(path, gpath.GPath):
path = gpath.GPath(path)
if not path.parent.exists():
path.parent.mkdir(exist_ok=True, parents=True)
with path.open('wb') as f:
image = Image.fromarray(np.asarray(image))
image.save(f, format=path.suffix.lstrip('.'))
def save_depth(path: types.PathType, depth: np.ndarray) -> None:
save_image(path, image_to_uint16(depth / 1000.0))
def load_depth(path: types.PathType) -> np.ndarray:
depth = load_image(path)
if depth.dtype != np.uint16:
raise ValueError('Depth image must be of type uint16.')
return image_to_float32(depth) * 1000.0
def checkerboard(h, w, size=8, true_val=1.0, false_val=0.0):
"""Creates a checkerboard pattern with height h and width w."""
i = int(math.ceil(h / (size * 2)))
j = int(math.ceil(w / (size * 2)))
pattern = np.kron([[1, 0] * j, [0, 1] * j] * i,
np.ones((size, size)))[:h, :w]
true = np.full_like(pattern, fill_value=true_val)
false = np.full_like(pattern, fill_value=false_val)
return np.where(pattern > 0, true, false)
def pad_image(image, pad=0, pad_mode='constant', pad_value=0.0):
"""Pads a batched image array."""
batch_shape = image.shape[:-3]
padding = [
*[(0, 0) for _ in batch_shape],
(pad, pad), (pad, pad), (0, 0),
]
if pad_mode == 'constant':
return np.pad(image, padding, pad_mode, constant_values=pad_value)
else:
return np.pad(image, padding, pad_mode)
def split_tiles(image, tile_size):
"""Splits the image into tiles of size `tile_size`."""
# The copy is necessary due to the use of the memory layout.
if image.ndim == 2:
image = image[..., None]
image = np.array(image)
image = make_divisible(image, tile_size).copy()
height = width = tile_size
nrows, ncols, depth = image.shape
stride = image.strides
nrows, m = divmod(nrows, height)
ncols, n = divmod(ncols, width)
if m != 0 or n != 0:
raise ValueError('Image must be divisible by tile size.')
return np.lib.stride_tricks.as_strided(
np.ravel(image),
shape=(nrows, ncols, height, width, depth),
strides=(height * stride[0], width * stride[1], *stride),
writeable=False)
def join_tiles(tiles):
"""Reconstructs the image from tiles."""
return np.concatenate(np.concatenate(tiles, 1), 1)
def make_grid(batch, grid_height=None, zoom=1, old_buffer=None, border_size=1):
"""Creates a grid out an image batch.
Args:
batch: numpy array of shape [batch_size, height, width, n_channels]. The
data can either be float in [0, 1] or int in [0, 255]. If the data has
only 1 channel it will be converted to a grey 3 channel image.
grid_height: optional int, number of rows to have. If not given, it is
set so that the output is a square. If -1, then tiling will only be
vertical.
zoom: optional int, how much to zoom the input. Default is no zoom.
old_buffer: Buffer to write grid into if possible. If not set, or if shape
doesn't match, we create a new buffer.
border_size: int specifying the white spacing between the images.
Returns:
A numpy array corresponding to the full grid, with 3 channels and values
in the [0, 255] range.
Raises:
ValueError: if the n_channels is not one of [1, 3].
"""
batch_size, height, width, n_channels = batch.shape
if grid_height is None:
n = int(math.ceil(math.sqrt(batch_size)))
grid_height = n
grid_width = n
elif grid_height == -1:
grid_height = batch_size
grid_width = 1
else:
grid_width = int(math.ceil(batch_size/grid_height))
if n_channels == 1:
batch = np.tile(batch, (1, 1, 1, 3))
n_channels = 3
if n_channels != 3:
raise ValueError('Image batch must have either 1 or 3 channels, but '
'was {}'.format(n_channels))
# We create the numpy buffer if we don't have an old buffer or if the size has
# changed.
shape = (height * grid_height + border_size * (grid_height - 1),
width * grid_width + border_size * (grid_width - 1),
n_channels)
if old_buffer is not None and old_buffer.shape == shape:
buf = old_buffer
else:
buf = np.full(shape, 255, dtype=np.uint8)
multiplier = 1 if np.issubdtype(batch.dtype, np.integer) else 255
for k in range(batch_size):
i = k // grid_width
j = k % grid_width
arr = batch[k]
x, y = i * (height + border_size), j * (width + border_size)
buf[x:x + height, y:y + width, :] = np.clip(multiplier * arr,
0, 255).astype(np.uint8)
if zoom > 1:
buf = buf.repeat(zoom, axis=0).repeat(zoom, axis=1)
return buf
|
|
# Copyright 2000-2010 Michael Hudson-Doyle <[email protected]>
# Antonio Cuni
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from pyrepl import commands, reader
from pyrepl.reader import Reader
def prefix(wordlist, j = 0):
d = {}
i = j
try:
while 1:
for word in wordlist:
d[word[i]] = 1
if len(d) > 1:
return wordlist[0][j:i]
i += 1
d = {}
except IndexError:
return wordlist[0][j:i]
import re
def stripcolor(s):
return stripcolor.regexp.sub('', s)
stripcolor.regexp = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]")
def real_len(s):
return len(stripcolor(s))
def left_align(s, maxlen):
stripped = stripcolor(s)
if len(stripped) > maxlen:
# too bad, we remove the color
return stripped[:maxlen]
padding = maxlen - len(stripped)
return s + ' '*padding
def build_menu(cons, wordlist, start, use_brackets, sort_in_column):
if use_brackets:
item = "[ %s ]"
padding = 4
else:
item = "%s "
padding = 2
maxlen = min(max(map(real_len, wordlist)), cons.width - padding)
cols = cons.width / (maxlen + padding)
rows = (len(wordlist) - 1)/cols + 1
if sort_in_column:
# sort_in_column=False (default) sort_in_column=True
# A B C A D G
# D E F B E
# G C F
#
# "fill" the table with empty words, so we always have the same amout
# of rows for each column
missing = cols*rows - len(wordlist)
wordlist = wordlist + ['']*missing
indexes = [(i%cols)*rows + i//cols for i in range(len(wordlist))]
wordlist = [wordlist[i] for i in indexes]
menu = []
i = start
for r in range(rows):
row = []
for col in range(cols):
row.append(item % left_align(wordlist[i], maxlen))
i += 1
if i >= len(wordlist):
break
menu.append( ''.join(row) )
if i >= len(wordlist):
i = 0
break
if r + 5 > cons.height:
menu.append(" %d more... "%(len(wordlist) - i))
break
return menu, i
# this gets somewhat user interface-y, and as a result the logic gets
# very convoluted.
#
# To summarise the summary of the summary:- people are a problem.
# -- The Hitch-Hikers Guide to the Galaxy, Episode 12
#### Desired behaviour of the completions commands.
# the considerations are:
# (1) how many completions are possible
# (2) whether the last command was a completion
# (3) if we can assume that the completer is going to return the same set of
# completions: this is controlled by the ``assume_immutable_completions``
# variable on the reader, which is True by default to match the historical
# behaviour of pyrepl, but e.g. False in the ReadlineAlikeReader to match
# more closely readline's semantics (this is needed e.g. by
# fancycompleter)
#
# if there's no possible completion, beep at the user and point this out.
# this is easy.
#
# if there's only one possible completion, stick it in. if the last thing
# user did was a completion, point out that he isn't getting anywhere, but
# only if the ``assume_immutable_completions`` is True.
#
# now it gets complicated.
#
# for the first press of a completion key:
# if there's a common prefix, stick it in.
# irrespective of whether anything got stuck in, if the word is now
# complete, show the "complete but not unique" message
# if there's no common prefix and if the word is not now complete,
# beep.
# common prefix -> yes no
# word complete \/
# yes "cbnu" "cbnu"
# no - beep
# for the second bang on the completion key
# there will necessarily be no common prefix
# show a menu of the choices.
# for subsequent bangs, rotate the menu around (if there are sufficient
# choices).
class complete(commands.Command):
def do(self):
r = self.reader
stem = r.get_stem()
if r.assume_immutable_completions and \
r.last_command_is(self.__class__):
completions = r.cmpltn_menu_choices
else:
r.cmpltn_menu_choices = completions = \
r.get_completions(stem)
if len(completions) == 0:
r.error("no matches")
elif len(completions) == 1:
if r.assume_immutable_completions and \
len(completions[0]) == len(stem) and \
r.last_command_is(self.__class__):
r.msg = "[ sole completion ]"
r.dirty = 1
r.insert(completions[0][len(stem):])
else:
p = prefix(completions, len(stem))
if p <> '':
r.insert(p)
if r.last_command_is(self.__class__):
if not r.cmpltn_menu_vis:
r.cmpltn_menu_vis = 1
r.cmpltn_menu, r.cmpltn_menu_end = build_menu(
r.console, completions, r.cmpltn_menu_end,
r.use_brackets, r.sort_in_column)
r.dirty = 1
elif stem + p in completions:
r.msg = "[ complete but not unique ]"
r.dirty = 1
else:
r.msg = "[ not unique ]"
r.dirty = 1
class self_insert(commands.self_insert):
def do(self):
commands.self_insert.do(self)
r = self.reader
if r.cmpltn_menu_vis:
stem = r.get_stem()
if len(stem) < 1:
r.cmpltn_reset()
else:
completions = [w for w in r.cmpltn_menu_choices
if w.startswith(stem)]
if completions:
r.cmpltn_menu, r.cmpltn_menu_end = build_menu(
r.console, completions, 0,
r.use_brackets, r.sort_in_column)
else:
r.cmpltn_reset()
class CompletingReader(Reader):
"""Adds completion support
Adds instance variables:
* cmpltn_menu, cmpltn_menu_vis, cmpltn_menu_end, cmpltn_choices:
*
"""
# see the comment for the complete command
assume_immutable_completions = True
use_brackets = True # display completions inside []
sort_in_column = False
def collect_keymap(self):
return super(CompletingReader, self).collect_keymap() + (
(r'\t', 'complete'),)
def __init__(self, console):
super(CompletingReader, self).__init__(console)
self.cmpltn_menu = ["[ menu 1 ]", "[ menu 2 ]"]
self.cmpltn_menu_vis = 0
self.cmpltn_menu_end = 0
for c in [complete, self_insert]:
self.commands[c.__name__] = c
self.commands[c.__name__.replace('_', '-')] = c
def after_command(self, cmd):
super(CompletingReader, self).after_command(cmd)
if not isinstance(cmd, self.commands['complete']) \
and not isinstance(cmd, self.commands['self_insert']):
self.cmpltn_reset()
def calc_screen(self):
screen = super(CompletingReader, self).calc_screen()
if self.cmpltn_menu_vis:
ly = self.lxy[1]
screen[ly:ly] = self.cmpltn_menu
self.screeninfo[ly:ly] = [(0, [])]*len(self.cmpltn_menu)
self.cxy = self.cxy[0], self.cxy[1] + len(self.cmpltn_menu)
return screen
def finish(self):
super(CompletingReader, self).finish()
self.cmpltn_reset()
def cmpltn_reset(self):
self.cmpltn_menu = []
self.cmpltn_menu_vis = 0
self.cmpltn_menu_end = 0
self.cmpltn_menu_choices = []
def get_stem(self):
st = self.syntax_table
SW = reader.SYNTAX_WORD
b = self.buffer
p = self.pos - 1
while p >= 0 and st.get(b[p], SW) == SW:
p -= 1
return u''.join(b[p+1:self.pos])
def get_completions(self, stem):
return []
def test():
class TestReader(CompletingReader):
def get_completions(self, stem):
return [s for l in map(lambda x:x.split(),self.history)
for s in l if s and s.startswith(stem)]
reader = TestReader()
reader.ps1 = "c**> "
reader.ps2 = "c/*> "
reader.ps3 = "c|*> "
reader.ps4 = "c\*> "
while reader.readline():
pass
if __name__=='__main__':
test()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for copying elements from one graph to another.
These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
There is also a function to retrive the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@copy_variable_to_graph
@@get_copied_op
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.python.ops.variables import Variable
from tensorflow.python.client.session import Session
from tensorflow.python.framework import ops
__all__ = ["copy_op_to_graph", "copy_variable_to_graph", "get_copied_op"]
def copy_variable_to_graph(org_instance, to_graph, scope=""):
"""Given a `Variable` instance from one `Graph`, initializes and returns
a copy of it from another `Graph`, under the specified scope
(default `""`).
Args:
org_instance: A `Variable` from some `Graph`.
to_graph: The `Graph` to copy the `Variable` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Variable` from `to_graph`.
Raises:
TypeError: If `org_instance` is not a `Variable`.
"""
if not isinstance(org_instance, Variable):
raise TypeError(str(org_instance) + " is not a Variable")
#The name of the new variable
if scope != "":
new_name = (scope + '/' +
org_instance.name[:org_instance.name.index(':')])
else:
new_name = org_instance.name[:org_instance.name.index(':')]
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope,
#except the special ones required for variable initialization and
#training.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if (name == ops.GraphKeys.GLOBAL_VARIABLES or
name == ops.GraphKeys.TRAINABLE_VARIABLES or
scope == ''):
collections.append(name)
else:
collections.append(scope + '/' + name)
#See if its trainable.
trainable = (org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
#Get the initial value
with org_instance.graph.as_default():
temp_session = Session()
init_value = temp_session.run(org_instance.initialized_value())
#Initialize the new variable
with to_graph.as_default():
new_var = Variable(init_value,
trainable,
name=new_name,
collections=collections,
validate_shape=False)
return new_var
def copy_op_to_graph(org_instance, to_graph, variables,
scope=""):
"""Returns a copy of an operation from another Graph under a specified scope.
Given an `Operation` `org_instance` from one `Graph`,
initializes and returns a copy of it from another `Graph`,
under the specified scope (default `""`).
The copying is done recursively, so any `Operation` whose output
is required to evaluate the `org_instance`, is also copied (unless
already done).
Since `Variable` instances are copied separately, those required
to evaluate `org_instance` must be provided as input.
Args:
org_instance: An `Operation` from some `Graph`. Could be a
`Placeholder` as well.
to_graph: The `Graph` to copy `org_instance` to.
variables: An iterable of `Variable` instances to copy `org_instance` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Operation` from `to_graph`.
Raises:
TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
#Extract names of variables
copied_variables = dict((x.name, x) for x in variables)
#If a variable by the new name already exists, return the
#correspondng tensor that will act as an input
if new_name in copied_variables:
return to_graph.get_tensor_by_name(
copied_variables[new_name].name)
#If an instance of the same name exists, return appropriately
try:
already_present = to_graph.as_graph_element(new_name,
allow_tensor=True,
allow_operation=True)
return already_present
except:
pass
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if scope == '':
collections.append(name)
else:
collections.append(scope + '/' + name)
#Take action based on the class of the instance
if isinstance(org_instance, ops.Tensor):
#If its a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
new_op = copy_op_to_graph(op, to_graph, variables, scope)
output_index = op.outputs.index(org_instance)
new_tensor = new_op.outputs[output_index]
#Add to collections if any
for collection in collections:
to_graph.add_to_collection(collection, new_tensor)
return new_tensor
elif isinstance(org_instance, ops.Operation):
op = org_instance
#If it has an original_op parameter, copy it
if op._original_op is not None:
new_original_op = copy_op_to_graph(op._original_op, to_graph,
variables, scope)
else:
new_original_op = None
#If it has control inputs, call this function recursively on each.
new_control_inputs = [copy_op_to_graph(x, to_graph, variables,
scope)
for x in op.control_inputs]
#If it has inputs, call this function recursively on each.
new_inputs = [copy_op_to_graph(x, to_graph, variables,
scope)
for x in op.inputs]
#Make a new node_def based on that of the original.
#An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it
#stores String-based info such as name, device and type of the op.
#Unique to every Operation instance.
new_node_def = deepcopy(op._node_def)
#Change the name
new_node_def.name = new_name
#Copy the other inputs needed for initialization
output_types = op._output_types[:]
input_types = op._input_types[:]
#Make a copy of the op_def too.
#Its unique to every _type_ of Operation.
op_def = deepcopy(op._op_def)
#Initialize a new Operation instance
new_op = ops.Operation(new_node_def,
to_graph,
new_inputs,
output_types,
new_control_inputs,
input_types,
new_original_op,
op_def)
#Use Graph's hidden methods to add the op
to_graph._record_op_seen_by_control_dependencies(new_op)
for device_function in reversed(to_graph._device_function_stack):
new_op._set_device(device_function(new_op))
return new_op
else:
raise TypeError("Could not copy instance: " + str(org_instance))
def get_copied_op(org_instance, graph, scope=""):
"""Given an `Operation` instance from some `Graph`, returns
its namesake from `graph`, under the specified scope
(default `""`).
If a copy of `org_instance` is present in `graph` under the given
`scope`, it will be returned.
Args:
org_instance: An `Operation` from some `Graph`.
graph: The `Graph` to be searched for a copr of `org_instance`.
scope: The scope `org_instance` is present in.
Returns:
The `Operation` copy from `graph`.
"""
#The name of the copied instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
return graph.as_graph_element(new_name, allow_tensor=True,
allow_operation=True)
|
|
#!/usr/bin/env python
import sys
import os
import re
from collections import OrderedDict
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../lib')
sys.path.append(libdir)
import utils
import jdecode
datadir = os.path.realpath(os.path.join(libdir, '../data'))
gramdir = os.path.join(datadir, 'ngrams')
compute_ngrams = False
gramdicts = {}
if os.path.isdir(gramdir):
import keydiff
compute_ngrams = True
for fname in os.listdir(gramdir):
suffixes = re.findall(r'\.[0-9]*g$', fname)
if suffixes:
grams = int(suffixes[0][1:-1])
d = {}
with open(os.path.join(gramdir, fname), 'rt') as f:
keydiff.parse_keyfile(f, d, int)
gramdicts[grams] = d
def rare_grams(card, thresh = 2, grams = 2):
if not grams in gramdicts:
return None
rares = 0
gramdict = gramdicts[grams]
for line in card.text_lines_words:
for i in range(0, len(line) - (grams - 1)):
ngram = ' '.join([line[i + j] for j in range(0, grams)])
if ngram in gramdict:
if gramdict[ngram] < thresh:
rares += 1
else:
rares += 1
return rares
def list_only(l, items):
for e in l:
if not e in items:
return False
return True
def pct(x, total):
pctstr = 100.0 * float(x) / float(total)
return '(' + str(pctstr)[:5] + '%)'
def check_types(card):
if 'instant' in card.types:
return list_only(card.types, ['tribal', 'instant'])
if 'sorcery' in card.types:
return list_only(card.types, ['tribal', 'sorcery'])
if 'creature' in card.types:
return list_only(card.types, ['tribal', 'creature', 'artifact', 'land', 'enchantment'])
if 'planeswalker' in card.types:
return list_only(card.types, ['tribal', 'planeswalker', 'artifact', 'land', 'enchantment'])
else:
return list_only(card.types, ['tribal', 'artifact', 'land', 'enchantment'])
def check_pt(card):
if ('creature' in card.types or 'vehicle' in card.subtypes) or card.pt:
return ((('creature' in card.types or 'vehicle' in card.subtypes) and len(re.findall(re.escape('/'), card.pt)) == 1)
and not card.loyalty)
if 'planeswalker' in card.types or card.loyalty:
return (('planeswalker' in card.types and card.loyalty)
and not card.pt)
return None
def check_lands(card):
if 'land' in card.types:
return card.cost.format() == '_NOCOST_'
else:
return None
# doesn't handle granted activated abilities in ""
def check_X(card):
correct = None
incost = 'X' in card.cost.encode()
extra_cost_lines = 0
cost_lines = 0
use_lines = 0
for mt in card.text_lines:
sides = mt.text.split(':')
if len(sides) == 2:
actcosts = len(re.findall(re.escape(utils.reserved_mana_marker), sides[0]))
lcosts = mt.costs[:actcosts]
rcosts = mt.costs[actcosts:]
if 'X' in sides[0] or (utils.reserved_mana_marker in sides[0] and
'X' in ''.join(map(lambda c: c.encode(), lcosts))):
if incost:
return False # bad, duplicated Xs in costs
if 'X' in sides[1] or (utils.reserved_mana_marker in sides[1] and
'X' in ''.join(map(lambda c: c.encode(), rcosts))):
correct = True # good, defined X is either specified or used
if 'monstrosity' in sides[1]:
extra_cost_lines += 1
continue
elif 'remove X % counters' in sides[0] and 'each counter removed' in sides[1]:
correct = True # Blademane Baku
continue
elif 'note' in sides[1]:
correct = True # Ice Cauldron
continue
else:
return False # bad, defined X is unused
# we've checked all cases where an X ocurrs in an activiation cost
linetext = mt.encode()
intext = len(re.findall(r'X', linetext))
defs = (len(re.findall(r'X is', linetext))
+ len(re.findall(re.escape('pay {X'), linetext))
+ len(re.findall(re.escape('pay X'), linetext))
+ len(re.findall(re.escape('reveal X'), linetext))
+ len(re.findall(re.escape('may tap X'), linetext)))
if incost:
if intext:
correct = True # defined and used or specified in some way
elif intext > 0:
if intext > 1 and defs > 0:
correct = True # look for multiples
elif 'suspend' in linetext or 'bloodthirst' in linetext:
correct = True # special case keywords
elif 'reinforce' in linetext and intext > 2:
correct = True # this should work
elif 'contain {X' in linetext or 'with {X' in linetext:
correct = True
elif ('additional cost' in linetext
or 'morph' in linetext
or 'kicker' in linetext):
cost_lines += 1
else:
use_lines += 1
if incost and not correct:
if 'sunburst' in card.text.text or 'spent to cast' in card.text.text:
return True # Engineered Explosives, Skyrider Elf
return False # otherwise we should have seen X somewhere if it was in the cost
elif cost_lines > 0 or use_lines > 0:
if (cost_lines + extra_cost_lines) == 1 and use_lines > 0:
return True # dreams, etc.
else:
return False
return correct
def check_kicker(card):
# also lazy and simple
if 'kicker' in card.text.text or 'kicked' in card.text.text:
# could also check for costs, at least make 'it's $ kicker,' not count as a kicker ability
newtext = card.text.text.replace(utils.reserved_mana_marker + ' kicker', '')
return 'kicker' in newtext and 'kicked' in newtext
else:
return None
def check_counters(card):
uses = len(re.findall(re.escape(utils.counter_marker), card.text.text))
if uses > 0:
return uses > 1 and 'countertype ' + utils.counter_marker in card.text.text
else:
return None
def check_choices(card):
bullets = len(re.findall(re.escape(utils.bullet_marker), card.text.text))
obracks = len(re.findall(re.escape(utils.choice_open_delimiter), card.text.text))
cbracks = len(re.findall(re.escape(utils.choice_close_delimiter), card.text.text))
if bullets + obracks + cbracks > 0:
if not (obracks == cbracks and bullets > 0):
return False
# could compile ahead of time
choice_regex = (re.escape(utils.choice_open_delimiter) + re.escape(utils.unary_marker)
+ r'.*' + re.escape(utils.bullet_marker) + r'.*'
+ re.escape(utils.choice_close_delimiter))
nochoices = re.sub(choice_regex, '', card.text.text)
nobullets = len(re.findall(re.escape(utils.bullet_marker), nochoices))
noobracks = len(re.findall(re.escape(utils.choice_open_delimiter), nochoices))
nocbracks = len(re.findall(re.escape(utils.choice_close_delimiter), nochoices))
return nobullets + noobracks + nocbracks == 0
else:
return None
def check_auras(card):
# a bit loose
if 'enchantment' in card.types or 'aura' in card.subtypes or 'enchant' in card.text.text:
return 'enchantment' in card.types or 'aura' in card.subtypes or 'enchant' in card.text.text
else:
return None
def check_equipment(card):
# probably even looser, chould check for actual equip abilities and noncreatureness
if 'equipment' in card.subtypes:
return 'equip' in card.text.text
else:
return None
def check_vehicles(card):
if 'vehicle' in card.subtypes:
return 'crew' in card.text.text
else:
return None
def check_planeswalkers(card):
if 'planeswalker' in card.types:
good_lines = 0
bad_lines = 0
initial_re = r'^[+-]?' + re.escape(utils.unary_marker) + re.escape(utils.unary_counter) + '*:'
initial_re_X = r'^[-+]' + re.escape(utils.x_marker) + '+:'
for line in card.text_lines:
if len(re.findall(initial_re, line.text)) == 1:
good_lines += 1
elif len(re.findall(initial_re_X, line.text)) == 1:
good_lines += 1
elif 'can be your commander' in line.text:
pass
elif 'countertype' in line.text or 'transform' in line.text:
pass
else:
bad_lines += 1
return good_lines > 1 and bad_lines == 0
else:
return None
def check_levelup(card):
if 'level' in card.text.text:
uplines = 0
llines = 0
for line in card.text_lines:
if 'countertype ' + utils.counter_marker + ' level' in line.text:
uplines += 1
llines += 1
elif 'with level up' in line.text:
llines += 1
elif 'level up' in line.text:
uplines += 1
elif 'level' in line.text:
llines += 1
return uplines == 1 and llines > 0
else:
return None
def check_activated(card):
activated = 0
for line in card.text_lines:
if '.' in line.text:
subtext = re.sub(r'"[^"]*"', '', line.text)
if 'forecast' in subtext:
pass
elif 'return ' + utils.this_marker + ' from your graveyard' in subtext:
pass
elif 'on the stack' in subtext:
pass
elif ':' in subtext:
activated += 1
if activated > 0:
return list_only(card.types, ['creature', 'land', 'artifact', 'enchantment', 'planeswalker', 'tribal'])
else:
return None
def check_triggered(card):
triggered = 0
triggered_2 = 0
for line in card.text_lines:
if 'when ' + utils.this_marker + ' enters the battlefield' in line.text:
triggered += 1
if 'when ' + utils.this_marker + ' leaves the battlefield' in line.text:
triggered += 1
if 'when ' + utils.this_marker + ' dies' in line.text:
triggered += 1
elif 'at the beginning' == line.text[:16] or 'when' == line.text[:4]:
if 'from your graveyard' in line.text:
triggered_2 += 1
elif 'in your graveyard' in line.text:
triggered_2 += 1
elif 'if ' + utils.this_marker + ' is suspended' in line.text:
triggered_2 += 1
elif 'if that card is exiled' in line.text or 'if ' + utils.this_marker + ' is exiled' in line.text:
triggered_2 += 1
elif 'when the creature ' + utils.this_marker + ' haunts' in line.text:
triggered_2 += 1
elif 'when you cycle ' + utils.this_marker in line.text or 'when you cast ' + utils.this_marker in line.text:
triggered_2 += 1
elif 'this turn' in line.text or 'this combat' in line.text or 'your next upkeep' in line.text:
triggered_2 += 1
elif 'from your library' in line.text:
triggered_2 += 1
elif 'you discard ' + utils.this_marker in line.text or 'you to discard ' + utils.this_marker in line.text:
triggered_2 += 1
else:
triggered += 1
if triggered > 0:
return list_only(card.types, ['creature', 'land', 'artifact', 'enchantment', 'planeswalker', 'tribal'])
elif triggered_2:
return True
else:
return None
def check_chosen(card):
if 'chosen' in card.text.text:
return ('choose' in card.text.text
or 'chosen at random' in card.text.text
or 'name' in card.text.text
or 'is chosen' in card.text.text
or 'search' in card.text.text)
else:
return None
def check_shuffle(card):
retval = None
# sadly, this does not detect spurious shuffling
for line in card.text_lines:
if 'search' in line.text and 'library' in line.text:
thisval = ('shuffle' in line.text
or 'searches' in line.text
or 'searched' in line.text
or 'searching' in line.text
or 'rest' in line.text
or 'instead' in line.text)
if retval is None:
retval = thisval
else:
retval = retval and thisval
return retval
def check_quotes(card):
retval = None
for line in card.text_lines:
quotes = len(re.findall(re.escape('"'), line.text))
# HACK: the '" pattern in the training set is actually incorrect
quotes += len(re.findall(re.escape('\'"'), line.text))
if quotes > 0:
thisval = quotes % 2 == 0
if retval is None:
retval = thisval
else:
retval = retval and thisval
return retval
props = OrderedDict([
('types', check_types),
('pt', check_pt),
('lands', check_lands),
('X', check_X),
('kicker', check_kicker),
('counters', check_counters),
('choices', check_choices),
('quotes', check_quotes),
('auras', check_auras),
('equipment', check_equipment),
('vehicles', check_vehicles),
('planeswalkers', check_planeswalkers),
('levelup', check_levelup),
('chosen', check_chosen),
('shuffle', check_shuffle),
('activated', check_activated),
('triggered', check_triggered),
])
def process_props(cards, dump = False, uncovered = False):
total_all = 0
total_good = 0
total_bad = 0
total_uncovered = 0
values = OrderedDict([(k, (0,0,0)) for k in props])
for card in cards:
total_all += 1
overall = True
any_prop = False
for prop in props:
(total, good, bad) = values[prop]
this_prop = props[prop](card)
if not this_prop is None:
total += 1
if not prop == 'types':
any_prop = True
if this_prop:
good += 1
else:
bad += 1
overall = False
if card.name not in ['demonic pact', 'lavaclaw reaches',
"ertai's trickery", 'rumbling aftershocks', # i hate these
] and dump:
print('---- ' + prop + ' ----')
print(card.encode())
print(card.format())
values[prop] = (total, good, bad)
if overall:
total_good += 1
else:
total_bad += 1
if not any_prop:
total_uncovered += 1
if uncovered:
print('---- uncovered ----')
print(card.encode())
print(card.format())
return ((total_all, total_good, total_bad, total_uncovered),
values)
def main(fname, oname = None, verbose = False, dump = False):
# may need to set special arguments here
cards = jdecode.mtg_open_file(fname, verbose=verbose)
do_grams = False
if do_grams:
rg = {}
for card in cards:
g = rare_grams(card, thresh=2, grams=2)
if len(card.text_words) > 0:
g = int(1.0 + (float(g) * 100.0 / float(len(card.text_words))))
if g in rg:
rg[g] += 1
else:
rg[g] = 1
if g >= 60:
print g
print card.format()
tot = 0
vmax = sum(rg.values())
pct90 = None
pct95 = None
pct99 = None
for i in sorted(rg):
print str(i) + ' rare ngrams: ' + str(rg[i])
tot += rg[i]
if pct90 is None and tot >= vmax * 0.90:
pct90 = i
if pct95 is None and tot >= vmax * 0.95:
pct95 = i
if pct99 is None and tot >= vmax * 0.99:
pct99 = i
print '90% - ' + str(pct90)
print '95% - ' + str(pct95)
print '99% - ' + str(pct99)
else:
((total_all, total_good, total_bad, total_uncovered),
values) = process_props(cards, dump=dump)
# summary
print('-- overall --')
print(' total : ' + str(total_all))
print(' good : ' + str(total_good) + ' ' + pct(total_good, total_all))
print(' bad : ' + str(total_bad) + ' ' + pct(total_bad, total_all))
print(' uncocoverd: ' + str(total_uncovered) + ' ' + pct(total_uncovered, total_all))
print('----')
# breakdown
for prop in props:
(total, good, bad) = values[prop]
print(prop + ':')
print(' total: ' + str(total) + ' ' + pct(total, total_all))
print(' good : ' + str(good) + ' ' + pct(good, total_all))
print(' bad : ' + str(bad) + ' ' + pct(bad, total_all))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', #nargs='?'. default=None,
help='encoded card file or json corpus to process')
parser.add_argument('outfile', nargs='?', default=None,
help='name of output file, will be overwritten')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
parser.add_argument('-d', '--dump', action='store_true',
help='print invalid cards')
args = parser.parse_args()
main(args.infile, args.outfile, verbose=args.verbose, dump=args.dump)
exit(0)
|
|
# -*- coding: utf-8 -*-
from functools import update_wrapper
import pprint
import urlparse
from cms import constants
from cms.exceptions import CMSDeprecationWarning
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
import os
import warnings
__all__ = ['get_cms_setting']
class VERIFIED: pass # need a unique identifier for CMS_LANGUAGES
def default(name):
def decorator(wrapped):
def wrapper():
if hasattr(settings, name):
return getattr(settings, name)
return wrapped()
update_wrapper(wrapper, wrapped)
return wrapped
return decorator
DEFAULTS = {
'TEMPLATE_INHERITANCE': True,
'PLACEHOLDER_CONF': {},
'PERMISSION': False,
# Whether to use raw ID lookups for users when PERMISSION is True
'RAW_ID_USERS': False,
'PUBLIC_FOR': 'all',
'CONTENT_CACHE_DURATION': 60,
'SHOW_START_DATE': False,
'SHOW_END_DATE': False,
'URL_OVERWRITE': True,
'MENU_TITLE_OVERWRITE': False,
'REDIRECTS': False,
'SEO_FIELDS': False,
'APPHOOKS': [],
'SOFTROOT': False,
'SITE_CHOICES_CACHE_KEY': 'CMS:site_choices',
'PAGE_CHOICES_CACHE_KEY': 'CMS:page_choices',
'MEDIA_PATH': 'cms/',
'PAGE_MEDIA_PATH': 'cms_page_media/',
'TITLE_CHARACTER': '+',
'CACHE_PREFIX': 'cms-',
'PLUGIN_PROCESSORS': [],
'PLUGIN_CONTEXT_PROCESSORS': [],
'UNIHANDECODE_VERSION': None,
'UNIHANDECODE_DECODERS': ['ja', 'zh', 'kr', 'vn', 'diacritic'],
'UNIHANDECODE_DEFAULT_DECODER': 'diacritic',
'MAX_PAGE_PUBLISH_REVERSIONS': 25,
}
def get_cache_durations():
return {
'menus': getattr(settings, 'MENU_CACHE_DURATION', 60 * 60),
'content': get_cms_setting('CONTENT_CACHE_DURATION'),
'permissions': 60 * 60,
}
@default('CMS_MEDIA_ROOT')
def get_media_root():
return os.path.join(settings.MEDIA_ROOT, get_cms_setting('MEDIA_PATH'))
@default('CMS_MEDIA_ROOT')
def get_media_url():
return urlparse.urljoin(settings.MEDIA_URL, get_cms_setting('MEDIA_PATH'))
@default('PLACEHOLDER_FRONTEND_EDITING')
def get_placeholder_frontend_editing():
return True
def get_templates():
templates = list(getattr(settings, 'CMS_TEMPLATES', []))
if get_cms_setting('TEMPLATE_INHERITANCE'):
templates.append((constants.TEMPLATE_INHERITANCE_MAGIC, _('Inherit the template of the nearest ancestor')))
return templates
def _ensure_languages_settings_new(languages):
valid_language_keys = ['code', 'name', 'fallbacks', 'hide_untranslated', 'redirect_on_fallback', 'public']
required_language_keys = ['code', 'name']
simple_defaults = ['public', 'redirect_on_fallback', 'hide_untranslated']
defaults = languages.pop('default', {})
default_fallbacks = defaults.get('fallbacks')
needs_fallbacks = []
for key in defaults:
if key not in valid_language_keys:
raise ImproperlyConfigured("CMS_LANGUAGES has an invalid property in the default properties: s" % key)
for key in simple_defaults:
if key not in defaults:
defaults[key] = True
for site, language_list in languages.items():
if not isinstance(site, int):
raise ImproperlyConfigured(
"CMS_LANGUAGES can only be filled with integers (site IDs) and 'default'"
" for default values. %s is not a valid key." % site)
for language_object in language_list:
for required_key in required_language_keys:
if required_key not in language_object:
raise ImproperlyConfigured("CMS_LANGUAGES has a language which is missing the required key %r "
"in site %r" % (key, site))
language_code = language_object['code']
for key in language_object:
if key not in valid_language_keys:
raise ImproperlyConfigured(
"CMS_LANGUAGES has invalid key %r in language %r in site %r" % (key, language_code, site)
)
if 'fallbacks' not in language_object:
if default_fallbacks:
language_object['fallbacks'] = default_fallbacks
else:
needs_fallbacks.append((site, language_object))
for key in simple_defaults:
if key not in language_object:
language_object[key] = defaults[key]
site_fallbacks = {}
for site, language_object in needs_fallbacks:
if site not in site_fallbacks:
site_fallbacks[site] = [lang['code'] for lang in languages[site] if lang['public']]
language_object['fallbacks'] = [lang_code for lang_code in site_fallbacks[site] if
lang_code != language_object['code']]
languages['default'] = defaults
return languages
def _get_old_language_conf(code, name, template):
language = template.copy()
language['code'] = code
language['name'] = name
default_fallbacks = dict(settings.CMS_LANGUAGES).keys()
if hasattr(settings, 'CMS_LANGUAGE_FALLBACK'):
if settings.CMS_LANGUAGE_FALLBACK:
if hasattr(settings, 'CMS_LANGUAGE_CONF'):
language['fallbacks'] = settings.CMS_LANGUAGE_CONF.get(code, default_fallbacks)
else:
language['fallbacks'] = default_fallbacks
else:
language['fallbacks'] = []
else:
if hasattr(settings, 'CMS_LANGUAGE_CONF'):
language['fallbacks'] = settings.CMS_LANGUAGE_CONF.get(code, default_fallbacks)
else:
language['fallbacks'] = default_fallbacks
if hasattr(settings, 'CMS_FRONTEND_LANGUAGES'):
language['public'] = code in settings.CMS_FRONTEND_LANGUAGES
return language
def _translate_legacy_languages_settings(languages):
new_languages = {}
lang_template = {'fallbacks': [], 'public': True, 'redirect_on_fallback': True,
'hide_untranslated': getattr(settings, 'CMS_HIDE_UNTRANSLATED', False)}
codes = dict(languages)
for site, site_languages in getattr(settings, 'CMS_SITE_LANGUAGES', {1: languages}).items():
new_languages[site] = []
for site_language in site_languages:
if site_language in codes:
new_languages[site].append(_get_old_language_conf(site_language, codes[site_language], lang_template))
pp = pprint.PrettyPrinter(indent=4)
warnings.warn("CMS_LANGUAGES has changed in django-cms 2.4\n"
"You may replace CMS_LANGUAGES with the following:\n%s" % pp.pformat(new_languages),
CMSDeprecationWarning)
new_languages['default'] = lang_template.copy()
return new_languages
def _ensure_languages_settings(languages):
if isinstance(languages, dict):
verified_languages = _ensure_languages_settings_new(languages)
else:
verified_languages = _translate_legacy_languages_settings(languages)
verified_languages[VERIFIED] = True # this will be busted by SettingsOverride and cause a re-check
return verified_languages
def get_languages():
if not settings.USE_I18N:
return _ensure_languages_settings(
{settings.SITE_ID: [{'code': settings.LANGUAGE_CODE, 'name': settings.LANGUAGE_CODE}]})
languages = getattr(settings, 'CMS_LANGUAGES', {
settings.SITE_ID: [{'code': code, 'name': _(name)} for code, name in settings.LANGUAGES]
})
if VERIFIED in languages:
return languages
return _ensure_languages_settings(languages)
def get_unihandecode_host():
host = getattr(settings, 'CMS_UNIHANDECODE_HOST', None)
if not host:
return host
if host.endswith('/'):
return host
else:
return host + '/'
COMPLEX = {
'CACHE_DURATIONS': get_cache_durations,
'MEDIA_ROOT': get_media_root,
'MEDIA_URL': get_media_url,
# complex because not prefixed by CMS_
'PLACEHOLDER_FRONTEND_EDITING': get_placeholder_frontend_editing,
'TEMPLATES': get_templates,
'LANGUAGES': get_languages,
'UNIHANDECODE_HOST': get_unihandecode_host,
}
def get_cms_setting(name):
if name in COMPLEX:
return COMPLEX[name]()
else:
return getattr(settings, 'CMS_%s' % name, DEFAULTS[name])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._certificates_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_resource_group_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates for a subscription.
Description for Get all certificates for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates in a resource group.
Description for Get all certificates in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.Certificate":
"""Get a certificate.
Description for Get a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.Certificate",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_09_01.models.Certificate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_envelope, 'Certificate')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a certificate.
Description for Delete a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.CertificatePatchResource",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_09_01.models.CertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_envelope, 'CertificatePatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
|
|
# -*- coding: utf-8 -*-
"""
migrated from TheCannon package
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import sys
from corner import corner
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rc
# from astropy.table import Table
from .find_continuum_pixels import *
from .continuum_normalization import (_cont_norm_gaussian_smooth,
_cont_norm_running_quantile,
_cont_norm_running_quantile_regions,
_find_cont_fitfunc,
_find_cont_fitfunc_regions,
_cont_norm,
_cont_norm_regions)
from .find_continuum_pixels import _find_contpix, _find_contpix_regions
rc('text', usetex=True)
rc('font', family='serif')
# if python3
PY3 = sys.version_info[0] > 2
if PY3:
basestring = (str, bytes)
else:
basestring = (str, unicode)
class Dataset(object):
""" A class to represent Cannon input: a dataset of spectra and labels """
def __init__(self, wl, tr_ID, tr_flux, tr_ivar, tr_label, test_ID, test_flux, test_ivar):
print("Loading dataset")
print("This may take a while...")
self.wl = wl
self.tr_ID = tr_ID
self.tr_flux = tr_flux
self.tr_ivar = tr_ivar
self.tr_label = tr_label
self.test_ID = test_ID
self.test_flux = test_flux
self.test_ivar = test_ivar
self.ranges = None
# calculate SNR
self.tr_SNR = np.array(
[self._SNR(*s) for s in zip(tr_flux, tr_ivar)])
self.test_SNR = np.array(
[self._SNR(*s) for s in zip(test_flux, test_ivar)])
def _SNR(self, flux, ivar):
""" Calculate the SNR of a spectrum, ignoring bad pixels
Parameters
----------
flux: numpy ndarray
pixel intensities
ivar: numpy ndarray
inverse variances corresponding to flux
Returns
-------
SNR: float
"""
take = ivar != 0
SNR = float(np.median(flux[take]*(ivar[take]**0.5)))
return SNR
def set_label_names(self, names):
""" Set the label names for plotting
Parameters
----------
names: ndarray or list
The names of the labels used for plotting, ex. in LaTeX syntax
"""
self._label_names = names
def get_plotting_labels(self):
""" Return the label names used make plots
Returns
-------
label_names: ndarray
The label names
"""
if self._label_names is None:
print("No label names yet!")
return None
else:
return self._label_names
def smooth_dataset(self):
""" Bins down all of the spectra and updates the dataset """
output = smooth_spectra(self.wl, self.tr_flux, self.tr_ivar)
self.wl = output[:,0,:]
self.tr_flux = output[:,1,:]
self.tr_ivar = output[:,2,:]
output = smooth_spectra(self.wl, self.test_flux, self.test_ivar)
self.test_flux = output[:,1,:]
self.test_ivar = output[:,2,:]
def diagnostics_SNR(self, figname="SNRdist.png"):
""" Plots SNR distributions of ref and test object spectra
Parameters
----------
(optional) figname: string
Filename to use for the output saved plot
"""
print("Diagnostic for SNRs of reference and survey objects")
data = self.test_SNR
plt.hist(data, bins=int(np.sqrt(len(data))), alpha=0.5, facecolor='r',
label="Survey Objects")
data = self.tr_SNR
plt.hist(data, bins=int(np.sqrt(len(data))), alpha=0.5, color='b',
label="Ref Objects")
plt.legend(loc='upper right')
#plt.xscale('log')
plt.title("SNR Comparison Between Reference and Survey Objects")
#plt.xlabel("log(Formal SNR)")
plt.xlabel("Formal SNR")
plt.ylabel("Number of Objects")
plt.savefig(figname)
plt.close()
print("Saved fig %s" %figname)
def diagnostics_ref_labels(self, figname="ref_labels_triangle.png"):
""" Plots all training labels against each other
Parameters
----------
(optional) figname: string
Filename of the saved output plot
"""
self._label_triangle_plot(self.tr_label, figname)
def _label_triangle_plot(self, label_vals, figname):
"""Make a triangle plot for the selected labels
Parameters
----------
label_vals: numpy ndarray
values of the labels
figname: str
if provided, save the figure into the given file
"""
labels = [r"$%s$" % l for l in self.get_plotting_labels()]
print("Plotting every label against every other")
fig = corner(label_vals, labels=labels, show_titles=True,
title_args={"fontsize":12})
fig.savefig(figname)
print("Saved fig %s" % figname)
plt.close(fig)
def make_contmask(self, fluxes, ivars, frac):
""" Identify continuum pixels using training spectra
Does this for each region of the spectrum if dataset.ranges is not None
Parameters
----------
fluxes: ndarray
Flux data values
ivars: ndarray
Inverse variances corresponding to flux data values
frac: float
The fraction of pixels that should be identified as continuum
Returns
-------
contmask: ndarray
Mask with True indicating that the pixel is continuum
"""
print("Finding continuum pixels...")
if self.ranges is None:
print("assuming continuous spectra")
contmask = _find_contpix(self.wl, fluxes, ivars, frac)
else:
print("taking spectra in %s regions" %len(self.ranges))
contmask = _find_contpix_regions(
self.wl, fluxes, ivars, frac, self.ranges)
print("%s pixels returned as continuum" %sum(contmask))
return contmask
def set_continuum(self, contmask):
""" Set the contmask attribute
Parameters
----------
contmask: ndarray
Mask with True indicating that the pixel is continuum
"""
self.contmask = contmask
def fit_continuum(self, deg, ffunc):
""" Fit a continuum to the continuum pixels
Parameters
----------
deg: int
Degree of the fitting function
ffunc: str
Type of fitting function, 'sinusoid' or 'chebyshev'
Returns
-------
tr_cont: ndarray
Flux values corresponding to the fitted continuum of training objects
test_cont: ndarray
Flux values corresponding to the fitted continuum of test objects
"""
print("Fitting Continuum...")
if self.ranges == None:
tr_cont = _find_cont_fitfunc(
self.tr_flux, self.tr_ivar, self.contmask, deg, ffunc)
test_cont = _find_cont_fitfunc(
self.test_flux, self.test_ivar, self.contmask, deg, ffunc)
else:
print("Fitting Continuum in %s Regions..." %len(self.ranges))
tr_cont = _find_cont_fitfunc_regions(self.tr_flux, self.tr_ivar,
self.contmask, deg, self.ranges, ffunc)
test_cont = _find_cont_fitfunc_regions(
self.test_flux, self.test_ivar,
self.contmask, deg, self.ranges, ffunc)
return tr_cont, test_cont
def continuum_normalize_training_q(self, q, delta_lambda):
""" Continuum normalize the training set using a running quantile
Parameters
----------
q: float
The quantile cut
delta_lambda: float
The width of the pixel range used to calculate the median
"""
print("Continuum normalizing the tr set using running quantile...")
if self.ranges is None:
return _cont_norm_running_quantile(
self.wl, self.tr_flux, self.tr_ivar,
q=q, delta_lambda=delta_lambda)
else:
return _cont_norm_running_quantile_regions(
self.wl, self.tr_flux, self.tr_ivar,
q=q, delta_lambda=delta_lambda, ranges=self.ranges)
def continuum_normalize(self, cont):
"""
Continuum normalize spectra, in chunks if spectrum has regions
Parameters
----------
cont: ndarray
Flux values corresponding to the continuum
Returns
-------
norm_tr_flux: ndarray
Normalized flux values for the training objects
norm_tr_ivar: ndarray
Rescaled inverse variance values for the training objects
norm_test_flux: ndarray
Normalized flux values for the test objects
norm_test_ivar: numpy ndarray
Rescaled inverse variance values for the test objects
"""
tr_cont, test_cont = cont
if self.ranges is None:
print("assuming continuous spectra")
norm_tr_flux, norm_tr_ivar = _cont_norm(
self.tr_flux, self.tr_ivar, tr_cont)
norm_test_flux, norm_test_ivar = _cont_norm(
self.test_flux, self.test_ivar, test_cont)
else:
print("taking spectra in %s regions" %(len(self.ranges)))
norm_tr_flux, norm_tr_ivar = _cont_norm_regions(
self.tr_flux, self.tr_ivar, tr_cont, self.ranges)
norm_test_flux, norm_test_ivar = _cont_norm_regions(
self.test_flux, self.test_ivar, test_cont, self.ranges)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar
def continuum_normalize_gaussian_smoothing(self, L):
""" Continuum normalize using a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
"""
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
_cont_norm_gaussian_smooth(self, L)
self.tr_flux = norm_tr_flux
self.tr_ivar = norm_tr_ivar
self.test_flux = norm_test_flux
self.test_ivar = norm_test_ivar
def diagnostics_test_step_flagstars(self):
"""
Write files listing stars whose inferred labels lie outside 2 standard deviations from the reference label space
"""
label_names = self.get_plotting_labels()
nlabels = len(label_names)
reference_labels = self.tr_label
test_labels = self.test_label_vals
test_IDs = np.array(self.test_ID)
mean = np.mean(reference_labels, 0)
stdev = np.std(reference_labels, 0)
lower = mean - 2 * stdev
upper = mean + 2 * stdev
for i in range(nlabels):
label_name = label_names[i]
test_vals = test_labels[:,i]
warning = np.logical_or(test_vals < lower[i], test_vals > upper[i])
filename = "flagged_stars_%s.txt" % i
with open(filename, 'w') as output:
for star in test_IDs[warning]:
output.write('{0:s}\n'.format(star))
print("Reference label %s" % label_name)
print("flagged %s stars beyond 2-sig of ref labels" % sum(warning))
print("Saved list %s" % filename)
def diagnostics_survey_labels(self, figname="survey_labels_triangle.png"):
""" Plot all survey labels against each other
Parameters
----------
(optional) figname: str
Filename of saved output plot
"""
self._label_triangle_plot(self.test_label_vals, figname)
def diagnostics_1to1(self, figname="1to1_label"):
""" Plots survey labels vs. training labels, color-coded by survey SNR """
snr = self.test_SNR
label_names = self.get_plotting_labels()
nlabels = len(label_names)
reference_labels = self.tr_label
test_labels = self.test_label_vals
for i in range(nlabels):
name = label_names[i]
orig = reference_labels[:,i]
cannon = test_labels[:,i]
# calculate bias and scatter
scatter = np.round(np.std(orig-cannon),5)
bias = np.round(np.mean(orig-cannon),5)
low = np.minimum(min(orig), min(cannon))
high = np.maximum(max(orig), max(cannon))
fig = plt.figure(figsize=(10,6))
gs = gridspec.GridSpec(1,2,width_ratios=[2,1], wspace=0.3)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.plot([low, high], [low, high], 'k-', linewidth=2.0, label="x=y")
ax1.set_xlim(low, high)
ax1.set_ylim(low, high)
ax1.legend(fontsize=14, loc='lower right')
pl = ax1.scatter(orig, cannon, marker='x', c=snr,
vmin=50, vmax=200, alpha=0.7)
cb = plt.colorbar(pl, ax=ax1, orientation='horizontal')
cb.set_label('SNR from Test Set', fontsize=12)
textstr = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax1.text(0.05, 0.95, textstr, transform=ax1.transAxes,
fontsize=14, verticalalignment='top')
ax1.tick_params(axis='x', labelsize=14)
ax1.tick_params(axis='y', labelsize=14)
ax1.set_xlabel("Reference Value", fontsize=14)
ax1.set_ylabel("Cannon Test Value", fontsize=14)
ax1.set_title("1-1 Plot of Label " + r"$%s$" % name)
diff = cannon-orig
npoints = len(diff)
mu = np.mean(diff)
sig = np.std(diff)
ax2.hist(diff)
#ax2.hist(diff, range=[-3*sig,3*sig], color='k', bins=np.sqrt(npoints),
# orientation='horizontal', alpha=0.3, histtype='stepfilled')
ax2.tick_params(axis='x', labelsize=14)
ax2.tick_params(axis='y', labelsize=14)
ax2.set_xlabel("Count", fontsize=14)
ax2.set_ylabel("Difference", fontsize=14)
ax2.axhline(y=0, c='k', lw=3, label='Difference=0')
ax2.set_title("Training Versus Test Labels for $%s$" %name,
fontsize=14)
ax2.legend(fontsize=14)
figname_full = "%s_%s.png" %(figname, i)
plt.savefig(figname_full)
print("Diagnostic for label output vs. input")
print("Saved fig %s" % figname_full)
plt.close()
def set_test_label_vals(self, vals):
""" Set test label values
Parameters
----------
vals: ndarray
Test label values
"""
self.test_label_vals = vals
def diagnostics_best_fit_spectra(self, *args, **kwargs):
""" Plot results of best-fit spectra for ten random test objects """
# overlay_spectra(model, self)
print('-------------------------------------------------------------')
print('@Cham: This method is deprecated due to in-complete migration')
print('@Cham: I am so sorry about that ... ')
print('-------------------------------------------------------------')
return None
# ###################################
# some general methods are moved here
# ###################################
def bin_flux(flux, ivar):
""" bin two neighboring flux values """
if np.sum(ivar)==0:
return np.sum(flux)/2.
return np.average(flux, weights=ivar)
def smooth_spectrum(wl, flux, ivar):
""" Bins down one spectrum
Parameters
----------
wl: numpy ndarray
wavelengths
flux: numpy ndarray
flux values
ivar: numpy ndarray
inverse variances associated with fluxes
Returns
-------
wl: numpy ndarray
updated binned pixel wavelengths
flux: numpy ndarray
updated binned flux values
ivar: numpy ndarray
updated binned inverse variances
"""
# if odd, discard the last point
if len(wl)%2 == 1:
wl = np.delete(wl, -1)
flux = np.delete(flux, -1)
ivar = np.delete(ivar, -1)
wl = wl.reshape(-1,2)
ivar = ivar.reshape(-1,2)
flux = flux.reshape(-1,2)
wl_binned = np.mean(wl, axis=1)
ivar_binned = np.sqrt(np.sum(ivar**2, axis=1))
flux_binned = np.array([bin_flux(f,w) for f,w in zip(flux, ivar)])
return wl_binned, flux_binned, ivar_binned
def smooth_spectra(wl, fluxes, ivars):
""" Bins down a block of spectra """
output = np.asarray(
[smooth_spectrum(wl, flux, ivar) for flux,ivar in zip(fluxes, ivars)])
return output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.